ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a48ddf9a2faad4f9ebf37fb9bba46d7a299591e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# scvi documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import scvi
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "2.0" # Nicer param docs
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"nbsphinx",
"nbsphinx_link",
"sphinx_autodoc_typehints",
"sphinx.ext.mathjax",
"sphinx_rtd_theme",
"sphinx.ext.intersphinx",
"autodocsumm",
]
# nbsphinx specific settings
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
autodoc_default_options = {"autosummary": True}
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"scVI"
copyright = u"2020, Romain Lopez, Adam Gayoso, Pierre Boyeau"
author = u"Romain Lopez"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = scvi.__version__
# The full version, including alpha/beta/rc tags.
release = scvi.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_show_sphinx = False
def setup(app):
app.add_stylesheet("css/custom.css")
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "scvidoc"
mathjax_config = {
"extensions": ["tex2jax.js"],
"jax": ["input/TeX", "output/HTML-CSS"],
"tex2jax": {
"inlineMath": [["$", "$"], ["\\(", "\\)"]],
"displayMath": [["$$", "$$"], ["\\[", "\\]"]],
"processEscapes": True,
},
}
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, "scvi.tex", u"scVI Documentation", u"Romain Lopez", "manual")
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "scvi", u"scVI Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"scvi",
u"scVI Documentation",
author,
"scvi",
"One line description of project.",
"Miscellaneous",
)
]
|
py | 1a48df38fb374e767d0ec1b82f45a59a4a853b03 | """
Cree-specific data for English Inflected Phrase search
"""
from morphodict.analysis.tag_map import TagMap
# tags needed for FST generator
crk_noun_tags = ["+N", "+A", "+I", "+D"]
# TagMaps can’t automatically pass through unknown tags because they wouldn’t
# know how to order the unknown tags amongst the defined precedence classes. For
# example, when populating a default tense tag of +Ind, it has to put that after
# +V+TA and before +1Sg, so it needs precedence values for all of those tags.
#
# So we list everything.
noun_passthrough_tags = {
0: [
# word class
"+N",
"+A",
"+I",
"+D",
],
2: [
# N: Possessives
"+Px1Sg",
"+Px2Sg",
"+Px3Sg",
"+Px1Pl",
"+Px2Pl",
"+Px12Pl",
"+Px3Pl",
"+Px4Sg/Pl",
"+PxX",
],
3: [
# N: number
"+Sg",
"+Pl",
"+Obv",
"+Loc",
"+Distr",
],
}
verb_passthrough_tags = {
0: [
# word class
"+V",
"+TA",
"+AI",
"+II",
"+TI",
],
2: [
# V: Person - subject
"+1Sg",
"+2Sg",
"+3Sg",
"+1Pl",
"+12Pl",
"+2Pl",
"+3Pl",
"+4Sg/Pl",
"+5Sg/Pl",
"+X",
],
3: [
# V: Person - object
"+1SgO",
"+2SgO",
"+3SgO",
"+1PlO",
"+21PlO",
"+2PlO",
"+3PlO",
"+4Pl",
"+4Sg",
"+4Sg/PlO",
"+5Sg/PlO",
"+XO",
],
}
def passthrough_tags_to_tuples(passthrough_tags):
return (
(tag, tag, precedence)
for precedence, tag_list in passthrough_tags.items()
for tag in tag_list
)
verb_tag_map = TagMap(
# Tense
("+Prt", ("PV/ki+", "+Ind"), 1), # Preterite aka simple past
("+Cond", ("+Fut", "+Cond"), 1), # Future conditional
("+Imm", ("+Imp", "+Imm"), 1), # Immediate imperative
("+Del", ("+Imp", "+Del"), 1), # Delayed imperative
("+Fut", ("PV/wi+", "+Ind"), 1), # Future
# TODO: also handle ("+Fut", "PV/wi+", 1) # Also accept PV/wi without independent as future?
# Note that these crk features as disjoint, but both are needed for the eng feature
("+Def", ("PV/ka+", "+Ind"), 1),
("+Inf", ("PV/ka+", "+Cnj"), 1),
(TagMap.DEFAULT, "+Ind", 1),
# Person - see https://github.com/UAlbertaALTLab/cree-intelligent-dictionary/issues/891
("+0Sg", "+3Sg", 2),
# Person - object
("+0SgO", (), 3),
# TODO: also handle "+Inf": ("PV/ta+", "+Cnj") # future definite?
*passthrough_tags_to_tuples(verb_passthrough_tags)
)
noun_tag_map = TagMap(
("+Dim", "+Der/Dim", 2), *passthrough_tags_to_tuples(noun_passthrough_tags)
)
|
py | 1a48e01df7c19233df97c6eb9c06c93ad32fddf0 | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
import logging
from logging.handlers import RotatingFileHandler
import os
#from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app) #db object that represents the db object. you create the object by passing it the application
login = LoginManager(app)
login.login_view = 'login' #if user tries to access a page with the @login decorator, the decorator will intercept the request and send the user to the login page.
#migrate = Migrate(app, db) - i skipped this session
if not app.debug:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240000,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
from app import routes, models, errors |
py | 1a48e04fde5f3ebf55c9f99fdfe2965540976d33 | n1 = float(input('digite um nr: '))
n2 = float(input('digite another nr: '))
soma = n1 + n2
print('A soma entre {:.1f} and {:.1f} eh igual a {:.1f}.'.format(n1,n2,soma)) |
py | 1a48e119f09ccb31060cd018eb71ea0db4cf45a4 | import cv2,os,PIL
import numpy as np
from keras.applications.vgg16 import decode_predictions
from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19
from keras.preprocessing import image as Image
from keras.applications.vgg16 import preprocess_input
from tqdm import tqdm
from skimage import feature
from keras.models import Model
import math
import pandas as pd
from scipy.cluster.vq import kmeans
from scipy.cluster.vq import whiten
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="nri_uniform")
#print(list(lbp.ravel()))
#print(set(list(lbp.ravel())))
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints*(self.numPoints-1) + 3),
range=(0, self.numPoints*(self.numPoints-1) + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
def image_feature_extraction(x, model):
data = preprocess_input(x)
layer_model = Model(inputs=model.input, outputs=model.layers[-2].output)
features = layer_model.predict(data)
print(im)
print(features.shape)
# print(features)
return features
def image_tags_extraction(x, model):
data = preprocess_input(x)
yhat = model.predict(data)
labels = decode_predictions(yhat, top=10)[0]
print(labels)
return labels
def image_colorfulness(image):
# split the image into its respective RGB components
(B, G, R) = cv2.split(image.astype("float"))
# compute rg = R - G
rg = np.absolute(R - G)
# compute yb = 0.5 * (R + G) - B
yb = np.absolute(0.5 * (R + G) - B)
# compute the mean and standard deviation of both `rg` and `yb`
(rbMean, rbStd) = (np.mean(rg), np.std(rg))
(ybMean, ybStd) = (np.mean(yb), np.std(yb))
# combine the mean and standard deviations
stdRoot = np.sqrt((rbStd ** 2) + (ybStd ** 2))
meanRoot = np.sqrt((rbMean ** 2) + (ybMean ** 2))
# derive the "colorfulness" metric and return it
return stdRoot + (0.3 * meanRoot)
def im_tags_embedding(labels, embedding_vector):
#print(labels[0])
words = []
for label in labels:
word = label[1]
#print(word)
words.append(word)
tags_matrix = []
zero_array = np.zeros(300)
for tag in words:
if tag in embedding_vector.keys():
tag_embedding = embedding_vector[tag]
tags_matrix.append(np.array(tag_embedding))
zero_array = zero_array+np.array(tag_embedding)
tag_feature = zero_array / len(tags_matrix)
return list(tag_feature)
def im_color_hist(im):
chans = cv2.split(im)
colors = ("b", "g", "r")
features = []
for (chan, color) in zip(chans, colors):
average_value = chan.mean()/256
features.append(average_value.item())
'''
hist = cv2.calcHist([chan],[0], None, [256], [0,255])
np.array(hist).flatten()
hist.resize(hist.size)
hist = list(hist/np.sum(hist))
features.extend(hist)
'''
return features
def dominant_color_rgb(image):
r = []
g = []
b = []
for line in image:
for pixel in line:
temp_r, temp_g, temp_b = pixel
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
df = pd.DataFrame({'red':r,'blue':b,'green':g})
df['scaled_red'] = whiten(df['red'])
df['scaled_blue'] = whiten(df['blue'])
df['scaled_green'] = whiten(df['green'])
cluster_center, distortion = kmeans(df[['scaled_red','scaled_green','scaled_blue']],1)
#print(cluster_center)
return cluster_center
def embedding_load(embedding_path):
embedding_vector = {}
f = open(embedding_path,'r',encoding='utf8')
for line in tqdm(f):
value = line.split(' ')
word = value[0]
coef = np.array(value[1:], dtype='float32')
embedding_vector[word] = coef
f.close()
return embedding_vector
if __name__ == '__main__':
model_names = ['VGG16','VGG19','ResNet50','InceptionV3','Xception']
dataset_name = 'dataset' #'silver_negative' 'silver_positive'
if dataset_name == 'dataset':
im_path = 'data/dataset_images/dataset_image/'
elif dataset_name == 'silver_negative':
im_path = 'data/silver_negative/'
elif dataset_name == 'silver_positive':
im_path = 'data/silver_positive/'
embedding_vector = embedding_load('embedding_300d.txt')
lbp_feature_dict = {}
other_feature_dict = {}
tags_embedding_feature_dict = {}
#last_layer_feature_dict = {}
#color_hist_feature_dict = {}
for model_name in model_names:
out_tag_file = open(dataset_name+'_'+ model_name + '_image_tags.txt', 'w', encoding='utf8')
deep_learning_feature_file_name = 'feature_data/' + dataset_name + '_'+ model_name +'_image_tag_feature.npy'
if model_name == 'VGG16':
model = VGG16(weights='imagenet', include_top=True)
im_size = 224
elif model_name == 'VGG19':
model = VGG19(weights='imagenet', include_top=True)
im_size = 224
elif model_name == 'ResNet50':
model = ResNet50(weights='imagenet', include_top=True)
im_size = 224
elif model_name == 'InceptionV3':
model = InceptionV3(weights='imagenet', include_top=True)
im_size = 299
elif model_name == 'Xception':
model = Xception(weights='imagenet', include_top=True)
im_size = 299
#print(model.summary())
for im in os.listdir(im_path):
print(im)
try:
img = Image.load_img(im_path + im, target_size=(im_size, im_size))
except:
print(im_path + im)
continue
x = Image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# im_last_layer_feature = image_feature_extraction(x, model)
# print('im_last_layer_feature length ', len(im_last_layer_feature))
image_tags = image_tags_extraction(x, model)
tags = ''
for tag in image_tags:
# print(tag[1])
tags = tags + tag[1] + ' '
print(im + '\t' + tags + '\n')
out_tag_file.write(im + '\t' + tags + '\n')
tags_embedding_feature = im_tags_embedding(image_tags, embedding_vector)
tags_embedding_feature_dict[im] = tags_embedding_feature
np.save(deep_learning_feature_file_name, tags_embedding_feature_dict)
out_tag_file.close()
for im in os.listdir(im_path):
print(im)
im_size = os.path.getsize(im_path+im)
print('im_size:', im_size)
image = cv2.imread(im_path+im)
try:
dominant_color = dominant_color_rgb(image)
print('dominant_color:', dominant_color[0])
except:
dominant_color = np.array([[0,0,0]])
colorfulness = image_colorfulness(image)
print('colorfulness:', colorfulness)
sp = image.shape
high = sp[0]
width = sp[1]
print('sp',sp[2])
total = 0
b = 45
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#print(gray)
arr = np.array(image)
for h in range(arr.shape[0]):
for w in range(arr.shape[1]):
total += (arr[(h,w,0)]-b)*(arr[(h,w,0)]-b)
contast = total/high*width
print(contast)
if contast>0:
contrast = math.sqrt(contast)
else:
contast = contast*(-1)
contrast = math.sqrt(contast)*(-1)
print('contrast:', contrast)
desc = LocalBinaryPatterns(8, 1.0) # 59
hist_LBP = desc.describe(gray) #
print(hist_LBP)
#color_hist = im_color_hist(image) # 768
#color_hist.append(h)
#color_hist.append(w)
lbp_feature_dict[im] = list(hist_LBP)
other_feature_dict[im] = [im_size/1000, high, width, colorfulness, contrast/1000]+list(dominant_color[0])
#print([im_size/1000, high, width, colorfulness, contrast/1000]+list(dominant_color[0]))
#color_hist_feature_dict[im] = color_hist
#last_layer_feature_dict[im] = im_last_layer_feature
np.save('feature_data/' + dataset_name+'_image_LBP_feature.npy', lbp_feature_dict)
np.save('feature_data/' + dataset_name+'_image_other_feature.npy', other_feature_dict)
#np.save(dataset_name+'_image_color_feature.npy', color_hist_feature_dict)
|
py | 1a48e1350b59d5c3f0fd40b9dc5c4a41db10459a | __all__ = ["ICOS"]
class ICOS:
""" Interface for processing ICOS data
"""
def __init__(self):
# Sampling period of ICOS data in seconds
self._sampling_period = "NA"
def read_file(self, data_filepath, site=None, network=None):
""" Reads ICOS data files and returns the UUIDS of the Datasources
the processed data has been assigned to
Args:
data_filepath (str or Path): Path of file to load
Returns:
list: UUIDs of Datasources data has been assigned to
"""
from pathlib import Path
from HUGS.Processing import assign_attributes
data_filepath = Path(data_filepath)
source_name = data_filepath.stem
if site is None:
site = source_name.split(".")[0]
species = source_name.split(".")[1]
# This should return xarray Datasets
gas_data = self.read_data(data_filepath=data_filepath, species=species, site=site)
# Assign attributes to the xarray Datasets here data here makes it a lot easier to test
gas_data = assign_attributes(data=gas_data, site=site, sampling_period=self._sampling_period)
return gas_data
def read_data(self, data_filepath, species, site=None):
""" Separates the gases stored in the dataframe in
separate dataframes and returns a dictionary of gases
with an assigned UUID as gas:UUID and a list of the processed
dataframes
TODO - update this to process multiple species here?
Args:
data_filepath (pathlib.Path): Path of datafile
species (str): Species to process
Returns:
dict: Dictionary containing attributes, data and metadata keys
"""
from pandas import read_csv, Timestamp
import numpy as np
from HUGS.Util import read_header
# metadata = read_metadata(filepath=data_filepath, data=data, data_type="ICOS")
header = read_header(filepath=data_filepath)
n_skip = len(header) - 1
species = "co2"
def date_parser(year, month, day, hour, minute):
return Timestamp(year, month, day, hour, minute)
datetime_columns = {"time": ["Year", "Month", "Day", "Hour", "Minute"]}
use_cols = [
"Year",
"Month",
"Day",
"Hour",
"Minute",
str(species.lower()),
"Stdev",
"NbPoints",
]
dtypes = {
"Day": np.int,
"Month": np.int,
"Year": np.int,
"Hour": np.int,
"Minute": np.int,
species.lower(): np.float,
"Stdev": np.float,
"SamplingHeight": np.float,
"NbPoints": np.int,
}
data = read_csv(
data_filepath,
skiprows=n_skip,
parse_dates=datetime_columns,
index_col="time",
sep=" ",
usecols=use_cols,
dtype=dtypes,
na_values="-999.99",
date_parser=date_parser,
)
data = data[data[species.lower()] >= 0.0]
# Drop duplicate indices
data = data.loc[~data.index.duplicated(keep="first")]
# Check if the index is sorted
if not data.index.is_monotonic_increasing:
data.sort_index()
rename_dict = {
"Stdev": species + " variability",
"NbPoints": species + " number_of_observations",
}
data = data.rename(columns=rename_dict)
# Conver to xarray Dataset
data = data.to_xarray()
combined_data = {}
site_attributes = {}
# Read some metadata from the filename
split_filename = data_filepath.name.split(".")
try:
site = split_filename[0]
time_resolution = split_filename[2]
inlet_height = split_filename[3]
except KeyError:
raise ValueError("Unable to read metadata from filename. We expect a filename such as tta.co2.1minute.222m.dat")
metadata = {
"site": site,
"species": species,
"inlet": inlet_height,
"time_resolution": time_resolution,
"network": "ICOS",
}
combined_data[species] = {
"metadata": metadata,
"data": data,
"attributes": site_attributes,
}
return combined_data
|
py | 1a48e3316f99801329638b73c216ee4a433c2e45 | import uuid
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
from clarifai_grpc.grpc.api.status import status_code_pb2
from tests.common import both_channels, metadata, raise_on_failure
@both_channels
def test_concept_post_get_patch(channel):
stub = service_pb2_grpc.V2Stub(channel)
random_string = uuid.uuid4().hex[:15]
random_concept_id = "concept-id-" + random_string
random_concept_name = "concept-name-的な-" + random_string
post_concepts_response = stub.PostConcepts(
service_pb2.PostConceptsRequest(
concepts=[resources_pb2.Concept(id=random_concept_id, name=random_concept_name)]
),
metadata=metadata(),
)
raise_on_failure(post_concepts_response)
get_concepts_response = stub.GetConcept(
service_pb2.GetConceptRequest(concept_id=random_concept_id), metadata=metadata()
)
raise_on_failure(get_concepts_response)
assert get_concepts_response.concept.id == random_concept_id
assert get_concepts_response.concept.name == random_concept_name
duplicated_post_concepts_response = stub.PostConcepts(
service_pb2.PostConceptsRequest(
concepts=[
resources_pb2.Concept(
id=random_concept_id,
)
]
),
metadata=metadata(),
)
assert (
duplicated_post_concepts_response.status.code
== status_code_pb2.StatusCode.CONCEPTS_INVALID_REQUEST
)
assert duplicated_post_concepts_response.status.description == "Invalid request"
assert "duplicate" in duplicated_post_concepts_response.status.details.lower()
post_concepts_searches_response = stub.PostConceptsSearches(
service_pb2.PostConceptsSearchesRequest(
concept_query=resources_pb2.ConceptQuery(name=random_concept_name)
),
metadata=metadata(),
)
raise_on_failure(post_concepts_searches_response)
assert random_concept_name in post_concepts_searches_response.concepts[0].name
patch_concepts_response = stub.PatchConcepts(
service_pb2.PatchConceptsRequest(
action="overwrite",
concepts=[resources_pb2.Concept(id=random_concept_id, name="some new concept name")],
),
metadata=metadata(),
)
raise_on_failure(patch_concepts_response)
@both_channels
def test_patching_public_concept_fails(channel):
stub = service_pb2_grpc.V2Stub(channel)
patch_concepts_searches_response = stub.PatchConcepts(
service_pb2.PatchConceptsRequest(
action="overwrite",
concepts=[
resources_pb2.Concept(
id="ai_98Xb0K3q", # The ID of a public concept.
name="this new name won't be applied",
)
],
),
metadata=metadata(),
)
assert (
patch_concepts_searches_response.status.code
== status_code_pb2.StatusCode.CONN_DOES_NOT_EXIST
)
assert patch_concepts_searches_response.status.description == "Resource does not exist"
|
py | 1a48e4cab478cfe2c0d8958ef7485fff879f3eaa | import bpy
import os
D = bpy.data
"""
This little script show in the Blender console textures paths
that not conform to the reference one.
"""
searchPath = r"partOfTextureName"
print("+++ search paths +++")
for img in D.images:
if img.filepath.endswith(searchPath) or img.name.endswith(searchPath):
print("found {} {}".format(img.name, img.filepath))
print("---")
for mat in bpy.data.materials:
if mat.node_tree is not None and len(mat.node_tree.nodes) > 0:
nodes = mat.node_tree.nodes
for node in nodes:
if type(node).__name__ == "ShaderNodeTexImage":
for out in node.outputs:
text_img = node.image
if text_img is not None and searchPath in text_img.filepath:
print("found on: {} > {} {}".format(mat.name, text_img.name, text_img.filepath)) |
py | 1a48e544c0134ea3659b56bf37f9cf456d99f322 | # This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <[email protected]>
# See file LICENSE.txt for license information.
from contextlib import contextmanager
import math
from six import BytesIO
from nose.tools import assert_raises
from zs import ZS, ZSWriter, ZSError
from zs.common import write_length_prefixed
from .util import tempname
# some of these helpers also used in test_cmdline to test 'make'
# Each of these records is 25 bytes long
records = []
# just in case of bugs, let's make sure to have an empty record
records.append(b"")
for i in range(1000):
records.append((u"THIS IS RECORD # %08i" % (i,)).encode("utf-8"))
# and a duplicate record
records.append(b"ZZZ THIS RECORD IS REPEATED")
records.append(b"ZZZ THIS RECORD IS REPEATED")
def ok_zs(p):
z = ZS(p)
z.validate()
return z
def temp_zs_path():
return tempname(".zs", unlink_first=True)
@contextmanager
def temp_writer(**kwargs):
with temp_zs_path() as p:
kwargs.setdefault("metadata", {})
kwargs.setdefault("branching_factor", 2)
with ZSWriter(p, **kwargs) as zw:
yield (p, zw)
def identity(x):
return x
def test_add_data_block():
with temp_writer() as (p, zw):
zw.add_data_block([b"a", b"b"])
zw.add_data_block([b"c", b"z"])
zw.finish()
with ok_zs(p) as z:
z.validate()
assert list(z.block_map(identity)) == [[b"a", b"b"],
[b"c", b"z"]]
def test_write_add_file_contents_terminator():
for terminator in [b"\n", b"\x00", b"\r\n"]:
f = BytesIO(terminator.join(records) + terminator)
with temp_writer() as (p, zw):
kwargs = {}
if terminator != b"\n":
kwargs["terminator"] = terminator
# approximately 4 records per data block
zw.add_file_contents(f, 100, **kwargs)
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
assert len(list(z.block_map(identity))) > len(records) / 5.0
def test_write_add_file_contents_length_prefixed():
for mode in ["uleb128", "u64le"]:
f = BytesIO()
write_length_prefixed(f, records, mode)
with temp_writer() as (p, zw):
# approximately 4 records per data block
zw.add_file_contents(BytesIO(f.getvalue()), 100,
length_prefixed=mode)
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
assert len(list(z.block_map(identity))) > len(records) / 5.0
def test_write_mixed():
with temp_writer() as (p, zw):
zw.add_data_block([b"a", b"b"])
f = BytesIO(b"c\nd\n")
zw.add_file_contents(f, 10)
zw.add_data_block([b"e", b"f"])
f = BytesIO(b"\x01g\x01h")
zw.add_file_contents(f, 10, length_prefixed="uleb128")
zw.finish()
with ok_zs(p) as z:
assert list(z) == [b"a", b"b", b"c", b"d", b"e", b"f", b"g", b"h"]
def test_writer_args():
with temp_zs_path() as p:
zw = ZSWriter(p, {"a": 1}, 2, parallelism=2, codec="deflate",
codec_kwargs={"compress_level": 3},
show_spinner=False, include_default_metadata=False)
try:
zw.add_data_block([b"a", b"b"])
zw.add_data_block([b"c", b"d"])
zw.finish()
finally:
zw.close()
with ok_zs(p) as z:
assert z.metadata == {"a": 1}
assert z.codec == "deflate"
def test_no_overwrite():
with temp_zs_path() as p:
f = open(p, "wb")
f.write(b"hi\n")
f.close()
assert_raises(ZSError, ZSWriter, p, {}, 2)
def test_bad_codec():
with temp_zs_path() as p:
assert_raises(ZSError, ZSWriter, p, {}, 2, codec="SUPERZIP")
def test_trailing_record():
with temp_writer() as (p, zw):
assert_raises(ZSError, zw.add_file_contents,
BytesIO(b"a\nb\nc"), 2)
def test_from_file_terminator_long_record():
with temp_writer() as (p, zw):
# individual records longer than the approx_block_size
records = [b"a" * 100, b"b" * 100]
f = BytesIO(b"\n".join(records + [b""]))
zw.add_file_contents(f, 10)
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
def test_from_file_length_prefixed_exactly_one_block():
with temp_writer() as (p, zw):
zw.add_file_contents(BytesIO(b"\x08aaaaaaaa\x04bbbb"), 10,
length_prefixed="uleb128")
zw.finish()
with ok_zs(p) as z:
assert list(z) == [b"a" * 8, b"b" * 4]
def test_closed_is_closed():
with temp_writer() as (_, zw):
zw.close()
assert_raises(ZSError, zw.add_file_contents, BytesIO(b""), 100)
assert_raises(ZSError, zw.add_data_block, [b""])
assert_raises(ZSError, zw.finish)
def test_empty():
with temp_writer() as (_, zw):
assert_raises(ZSError, zw.finish)
# empty blocks are silently dropped instead of being added
def test_no_empty_blocks():
with temp_writer() as (p, zw):
zw.add_data_block([b"a", b"b"])
zw.add_data_block([])
zw.add_file_contents(BytesIO(), 100)
zw.add_data_block([b"c", b"d"])
zw.finish()
# the implicit call to z.validate() here should error out if there are
# any empty blocks, but let's check anyway.
with ok_zs(p) as z:
assert len(list(z.block_map(identity))) == 2
def test_unsorted():
with temp_writer() as (_, zw):
with assert_raises(ZSError):
zw.add_file_contents(BytesIO(b"b\na\n"), 100)
zw.finish()
assert zw.closed
with temp_writer() as (_, zw):
with assert_raises(ZSError):
zw.add_data_block([b"b", b"a"])
zw.finish()
assert zw.closed
with temp_writer() as (_, zw):
with assert_raises(ZSError):
zw.add_data_block([b"m", b"n"])
zw.add_data_block([b"a", b"b"])
zw.finish()
assert zw.closed
def test_lengths():
# exercise all the corner cases in the index packing code
for num_blocks in range(1, 2 ** 5):
for branching_factor in [2, 3]:
block_tmpls = [(u"%04i" % (i,)).encode("utf-8")
for i in range(num_blocks)]
records = []
with temp_writer(branching_factor=branching_factor) as (p, zw):
for block_tmpl in block_tmpls:
block = [block_tmpl + suffix
for suffix in [b"a", b"b", b"c"]]
zw.add_data_block(block)
records += block
zw.finish()
with ok_zs(p) as z:
assert list(z) == records
assert (max(math.ceil(math.log(num_blocks)
/ math.log(branching_factor)),
1)
== z.root_index_level)
def test_clogged_queue():
# Failure to sort across blocks causes an error in the write worker, which
# then stops consuming from its queue. But we don't see it immediately,
# because the main process doesn't regularly check for errors. Eventually
# this causes the whole pipeline to stall. This tests that the main
# process eventually checks for errors under these conditions.
with temp_writer() as (p, zw):
zw.add_data_block([b"z"])
with assert_raises(ZSError):
while True:
zw.add_data_block([b"a"])
# Regression test: had a bug where a empty terminated chunk would cause
# alloc_hint=0 and trigger an infinite loop in pack_data_records.
def test_short_file():
with temp_writer() as (p, zw):
zw.add_file_contents(BytesIO(b"\n"), 128 * 2 ** 10)
zw.finish()
with ok_zs(p) as z:
assert list(z) == [b""]
|
py | 1a48e60797df8797917053eb2ed6df1dbf428081 | from os import path, rename, remove
import pytz
import datetime as dt
from flask import current_app
from flask_restful import request, Resource
from flask_apispec import marshal_with
from flask_jwt_extended import jwt_required
from webargs.flaskparser import use_kwargs
from werkzeug.utils import secure_filename
from run4it.api.templates import report_error_and_abort
from run4it.api.profile.auth_helper import get_auth_profile_or_abort
from run4it.app.database import db
from .model import Workout, WorkoutCategory as WorkoutCategoryModel
from .schema import workout_schema, workouts_schema, workout_update_schema, workout_categories_schema
from .gmaps import GeoCodeLookup
def is_valid_workout_filename(filename):
if filename is not None and filename != "":
allowed_extensions = current_app.config["ALLOWED_UPLOAD_EXTENSIONS"]
return "." in filename and filename.rsplit('.', 1)[1].lower() in allowed_extensions
else:
return False
def save_uploaded_file_or_abort(uploaded_file, profile_name):
filename = secure_filename("{0}_{1}".format(profile_name, uploaded_file.filename))
filepath = path.join(current_app.config["GPX_UPLOAD_DIR"], filename)
try:
uploaded_file.save(filepath)
except:
report_error_and_abort(422, "workout", "Workout file could not be read.")
return filepath
def get_autogenerated_workout_name(latitude, longitude, category_name):
try:
print("get_autogenerated_workout_name, {0},{1}".format(latitude, longitude))
geoLookupClient = GeoCodeLookup()
place_name = geoLookupClient.get_name_of_place(latitude, longitude)
print("place_name:", place_name)
if place_name != "":
return "{0} {1}".format(place_name, category_name)
else:
return category_name
except Exception as e:
print(e)
return category_name
def rename_uploaded_file(tmp_filepath, profile_name, workout_id):
# no try/except intentionally here, as we call this within a try block, and want to crash if rename fails :o)
filepath = ""
if is_valid_workout_filename(tmp_filepath):
extension = tmp_filepath.rsplit('.', 1)[1].lower()
filename = "{0}_workout_{1}.{2}".format(profile_name, workout_id, extension)
filepath = path.join(current_app.config["GPX_UPLOAD_DIR"], filename)
rename(tmp_filepath, filepath)
return filepath
def remove_uploaded_file(filepath):
if is_valid_workout_filename(filepath):
try:
remove(filepath)
except:
pass
def add_workout_data_to_goals(profile, workout):
goals = profile.get_active_goals(workout.start_at) # active 'as-we-speak'
if goals is not None:
for goal in goals:
goal.update_from_workout(workout)
def remove_workout_data_from_goals(profile, workout):
goals = profile.get_active_goals(workout.start_at) # active 'as-we-speak'
if goals is not None:
for goal in goals:
goal.remove_from_workout(workout)
class ProfileWorkoutList(Resource):
@jwt_required
@use_kwargs(workout_schema, error_status_code = 422, locations={"query"})
@marshal_with(workouts_schema)
def get(self, username, goal_id=None, limit=10, offset=0):
profile = get_auth_profile_or_abort(username, "workout")
if goal_id is None:
return profile.get_workouts(limit, offset)
goal = profile.get_goal_by_id(goal_id)
if goal is None:
report_error_and_abort(422, "workout", "Goal not found.")
return Workout.get_workouts_for_goal(goal)
@jwt_required
@use_kwargs(workout_update_schema, error_status_code = 422)
@marshal_with(workout_schema)
def post(self, username, name, start_at, distance, duration, category_id, climb=0, edited=False):
profile = get_auth_profile_or_abort(username, "workout")
category = WorkoutCategoryModel.get_by_id(category_id)
if category is None:
report_error_and_abort(422, "workout", "Workout category not found")
if name is None or name == "":
name = category.name
utc_start_at = start_at - start_at.utcoffset()
now = dt.datetime.utcnow().replace(tzinfo=pytz.UTC)
if utc_start_at > now:
report_error_and_abort(422, "workout", "Workout start time is in the future")
try:
new_workout = Workout(profile.id, category, name, utc_start_at, distance, duration, climb, None, edited)
new_workout.save()
add_workout_data_to_goals(profile, new_workout)
except:
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to create workout.")
return new_workout, 200, {'Location': '{}/{}'.format(request.path, new_workout.id)}
class ProfileWorkout(Resource):
@jwt_required
@marshal_with(workout_schema)
def get(self, username, workout_id):
profile = get_auth_profile_or_abort(username, "workout")
workout = profile.get_workout_by_id(workout_id)
if workout is None:
report_error_and_abort(404, "workout", "Workout not found.")
if workout.category.supports_gps_data:
workout.register_extended_data()
return workout
@jwt_required
@use_kwargs(workout_update_schema, error_status_code = 422)
@marshal_with(workout_schema)
def put(self, username, workout_id, name, start_at, distance, duration, category_id, climb=None, edited=None):
profile = get_auth_profile_or_abort(username, "workout")
workout = profile.get_workout_by_id(workout_id)
if workout is None:
report_error_and_abort(422, "workout", "Workout not found")
category = WorkoutCategoryModel.get_by_id(category_id)
if category is None:
report_error_and_abort(422, "workout", "Workout category not found")
if name is None or name == "":
name = category.name
utc_start_at = start_at - start_at.utcoffset()
now = dt.datetime.utcnow().replace(tzinfo=pytz.UTC)
if utc_start_at > now:
report_error_and_abort(422, "workout", "Workout start time is in the future")
# remove data from goal before registering updated
try:
remove_workout_data_from_goals(profile, workout)
except:
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to update workout")
# update category
workout.category = category
workout.name = name
workout.start_at = utc_start_at
workout.distance = distance
workout.duration = duration
if climb is not None:
workout.climb = climb
if edited is not None:
workout.edited = edited
try:
workout.save()
add_workout_data_to_goals(profile, workout)
except:
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to update workout")
return workout, 200
class ProfileWorkoutGpx(Resource): # both TCX and GPX are supported
@jwt_required
@marshal_with(workout_schema)
def post(self, username, category_id):
profile = get_auth_profile_or_abort(username, "workout")
category = WorkoutCategoryModel.get_by_id(category_id)
if category is None:
report_error_and_abort(422, "workout", "Workout category not found")
if request.files is None or len(request.files) != 1 or request.files["gpxfile"] is None:
report_error_and_abort(422, "workout", "Workout file not provided.")
uploaded_file = request.files["gpxfile"]
if not is_valid_workout_filename(uploaded_file.filename):
report_error_and_abort(422, "workout", "Workout filename invalid.")
tmp_filepath = save_uploaded_file_or_abort(uploaded_file, profile.username)
# create object with temporary data and use it to parse workout file
new_workout = Workout(profile.id, category, category.name, dt.datetime.utcnow(), 0, 1, 0, tmp_filepath, False)
new_workout.register_extended_data()
parsed_summary = new_workout.extended_summary
if parsed_summary is None:
remove_uploaded_file(tmp_filepath)
db.session.rollback()
report_error_and_abort(422, "workout", "Failed to parse uploaded file")
new_workout.name = get_autogenerated_workout_name(parsed_summary.latitude, parsed_summary.longitude, new_workout.category_name)
new_workout.start_at = parsed_summary.time
new_workout.duration = parsed_summary.duration
if category.supports_gps_data:
new_workout.distance = parsed_summary.distance
new_workout.climb = parsed_summary.elevation
workout_filepath = None
try:
new_workout.save()
workout_filepath = rename_uploaded_file(tmp_filepath, profile.username, new_workout.id)
new_workout.resource_path = workout_filepath
new_workout.save()
add_workout_data_to_goals(profile, new_workout)
except:
remove_uploaded_file(tmp_filepath)
remove_uploaded_file(workout_filepath)
db.session.rollback()
report_error_and_abort(500, "workout", "Unable to create workout from file.")
return new_workout, 200, {'Location': '{}/{}'.format(request.path, new_workout.id)}
class WorkoutCategoryList(Resource):
@marshal_with(workout_categories_schema)
def get(self):
return WorkoutCategoryModel.query.order_by(WorkoutCategoryModel.name.asc()).all()
|
py | 1a48ea3016159985bd335ff149a9c6f337ebc354 | # -*- coding: future_fstrings -*-
from .. import loader, utils
import logging, asyncio
logger = logging.getLogger(__name__)
def register(cb):
cb(AFKMod())
class AFKMod(loader.Module):
"""Provides a message saying that you are unavailable (out of office)"""
def __init__(self):
self.commands = {"afk":self.afkcmd, "unafk":self.unafkcmd}
self.config = {}
self.name = "AFK"
self._me = None
self._ratelimit = []
async def client_ready(self, client, db):
self._db = db
self._me = await client.get_me()
async def afkcmd(self, message):
""".afk [message]
If no message is provided, 'I'm AFK' will be used as default"""
if utils.get_args_raw(message):
self._db.set(__name__, "afk", utils.get_args_raw(message))
else:
self._db.set(__name__, "afk", True)
await message.edit("<code>I'm AFK</code>")
async def unafkcmd(self, message):
"""Remove the AFK status"""
self._ratelimit.clear()
self._db.set(__name__, "afk", False)
await message.edit("<code>I'm no longer AFK</code>")
async def watcher(self, message):
if message.mentioned or getattr(message.to_id, 'user_id', None) == self._me.id:
logger.debug("tagged!")
if message.from_id in self._ratelimit:
self._ratelimit.remove(message.from_id)
return
else:
self._ratelimit += [message.from_id]
user = await utils.get_user(message)
if user.is_self or user.bot or user.verified:
logger.debug("User is self, bot or verified.")
return
if self.get_afk() == True:
await message.reply("<code>I'm AFK!</code>")
elif self.get_afk() != False:
await message.reply(f"<code>{utils.escape_html(self.get_afk())}</code>")
def get_afk(self):
return self._db.get(__name__, "afk", False)
|
py | 1a48ea6f533686a4e7076d2a6edfcb6e3920ceaa | from datetime import datetime
from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import redirect
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///books.db'
db = SQLAlchemy(app)
class Publisher(db.Model):
pub_id = db.Column(db.Integer, primary_key=True)
pub_name = db.Column(db.String(20))
address = db.Column(db.String(20))
class Book(db.Model):
book_id = db.Column(db.Integer, primary_key = True)
book_name = db.Column(db.String(60))
author = db.Column(db.String(20))
# available
pub_id = db.Column(db.Integer, db.ForeignKey(Publisher.pub_id))
def __repr__(self):
return '<Books %r>' % self.book_id
class Member(db.Model):
mem_id = db.Column(db.Integer, primary_key=True)
mem_name = db.Column(db.String(20))
# join_date = db.Column(db.DateTime, default = datetime.utcnow)
Borrowed = db.Table('borrowed',
db.Column('book_id', db.Integer, db.ForeignKey(Book.book_id)),
db.Column('issue_date', db.DateTime, default = datetime.utcnow)
# due_date
)
@app.route('/', methods = ['POST', 'GET'])
def index():
return render_template('index.html')
@app.route('/collection', methods = ['POST', 'GET'])
def collection():
books = Book.query.order_by(Book.book_id).all()
return render_template('collection.html', books = books)
@app.route('/addbook', methods = ['POST', 'GET'])
def addbook():
if request.method == 'POST':
b_name = request.form['book_name']
a_name = request.form['author']
new_book = Book(book_name = b_name, author = a_name)
try:
db.session.add(new_book)
db.session.commit()
return redirect('/')
except:
return 'There was an error adding your book'
else:
books = Book.query.order_by(Book.book_id).all()
return render_template('addbook.html', books = books)
@app.route('/returnbook', methods = ['POST', 'GET'])
def returnbook():
if request.method == 'POST':
b_name = request.form['book_name']
a_name = request.form['author']
new_book = Book(book_name = b_name, author = a_name)
try:
db.session.add(new_book)
db.session.commit()
return redirect('/')
except:
return 'There was an error adding your book'
else:
books = Book.query.order_by(Book.book_id).all()
return render_template('returnbook.html', books = books)
@app.route('/member', methods = ['POST', 'GET'])
def member():
if request.method == 'POST':
m_name = request.form[Member.mem_name]
new_mem = Member(mem_name = m_name)
try:
db.session.add(new_mem)
db.session.commit()
return redirect('/')
except:
return 'There was an error adding the member'
else:
return render_template('member.html')
@app.route('/borrow', methods = ['POST', 'GET'])
def borrow():
books = Book.query.order_by(Book.book_id).all()
return render_template('borrow.html', books = books)
@app.route('/delete/<int:book_id>')
def delete(book_id):
book_to_delete = Book.query.get_or_404(book_id)
try:
db.session.delete(book_to_delete)
db.session.commit()
return redirect('/borrow')
except:
return 'There was a problem borrowing that book'
if __name__ == "__main__":
app.run(debug=True) |
py | 1a48eab8cc8686dfe8cf3434c45ecc8cdf086e5f | from src.ArchiveUtility import ArchiveUtility
from src.Downloader import Downloader
from src.FileOrganizer import FileOrganizer
from src.Utils import Utils
def handle_archival():
print("Would you like to use an existing CSV file in the archival of files?")
with_csv = Utils.get_string_input("Yes or no: ", ["YES", "NO"])
directory, csv_path, column_number, destination_number, force, asset_prefix, source_name, directory_prefix = [
None] * 8
if with_csv.upper() == "YES":
csv_path = Utils.get_string_input("Enter the path to your CSV (i.e 'yourdirectory/yourcsv.csv'): ")
column_number = Utils.get_string_input("Enter the column number for the file name's original name: ")
destination_number = Utils.get_string_input("Enter the column number for the file name's expected new name: ")
force = Utils.get_number_input(2,
"Would you like to change the existing files' names or copy them into a new zipped "
"directory? "
"\n1. Existing File Names\n2. Copy It\nEnter Number:")
if force == 1:
force = True
else:
force = False
elif with_csv.upper() == "NO":
asset_prefix = input("Enter the asset prefix to append to the each renamed file (press enter to have none): ")
source_name = input("Enter the source name (press enter to have none):")
directory_prefix = input("Enter the prefix for your altered directories (i.e __archive) (press enter to have "
"none): ")
directory = Utils.get_string_input(
"Enter the path to the directory containing all of the files you want to alter: ")
input("Hit enter when you are ready to run.")
ArchiveUtility(directory, asset_prefix, source_name, directory_prefix, csv_path, column_number,
destination_number, force).run()
def handle_file_organizer():
organization_types = ["Dimension"]
print("How would you like to organize your files?")
for i, o_type in enumerate(organization_types):
print(str(i + 1) + ":" + o_type)
num = Utils.get_number_input(len(organization_types), "Enter Number: ")
selected_type = organization_types[num - 1]
src = Utils.get_string_input("Enter the source directory for all of your files you want to organize: ")
dest = Utils.get_string_input("Enter the destination directory for all of the files you want to organize: ")
run_forever = Utils.get_string_input("Would you like to run this continuously? (Yes or no): ", ["YES", "NO"])
if run_forever.upper() == "YES":
run_forever = True
else:
run_forever = False
input("Hit enter when you are ready to run.")
FileOrganizer(src, dest, selected_type).organize(run_forever)
def handle_dropbox_download():
directory = input("Enter the directory path for your files (i.e /SS 01 FAMILY/TESTIMONIALS/200702): ")
destination = input("Enter the path to the folder that you want the downloaded files in: ")
api_key = input("Enter the API Key needed to access this account: ")
input("Hit enter when you are ready to run.")
Downloader(directory, destination, api_key).run()
def handle_dropbox_file_name_retrieval():
directory = input("Enter the directory path for your files (i.e /SS 01 FAMILY/TESTIMONIALS/200702): ")
api_key = input("Enter the API Key needed to access this account: ")
input("Hit enter when you are ready to run.")
Downloader(directory, None, api_key).get_files()
def run():
modes = {"Archival": handle_archival, "File Organize": handle_file_organizer, "Download":
handle_dropbox_download, "Retrieve File Names": handle_dropbox_file_name_retrieval}
print("Welcome to the Media Utility Tool")
print("What would you like to do?")
for i, mode in enumerate(modes.keys()):
print(str(i + 1) + ":" + mode)
choice = Utils.get_number_input(len(modes), "Enter number: ")
print("You selected: " + str(choice))
mode = modes[list(modes.keys())[choice - 1]]
mode()
if __name__ == '__main__':
run()
|
py | 1a48ebb995b3ddd2326cca83b2dbf5131e01918a | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import six
from fuel_mirror.common import utils
from fuel_mirror.tests import base
class DictAsObj(object):
def __init__(self, d):
self.__dict__.update(d)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class TestUtils(base.TestCase):
def test_lists_merge(self):
main = [{"a": 1, "b": 2, "c": 0}, {"a": 2, "b": 3, "c": 1}]
patch = [{"a": 2, "b": 4}, {"a": 3, "b": 5}]
utils.lists_merge(
main,
patch,
key="a"
)
self.assertItemsEqual(
[{"a": 1, "b": 2, "c": 0},
{"a": 2, "b": 4, "c": 1},
{"a": 3, "b": 5}],
main
)
def test_first(self):
self.assertEqual(
1,
utils.first(0, 1, 0),
)
self.assertEqual(
1,
utils.first(None, [], '', 1),
)
self.assertIsNone(
utils.first(None, [], 0, ''),
)
self.assertIsNone(
utils.first(),
)
def test_is_subdict(self):
self.assertFalse(utils.is_subdict({"c": 1}, {"a": 1, "b": 1}))
self.assertFalse(utils.is_subdict({"a": 1, "b": 2}, {"a": 1, "b": 1}))
self.assertFalse(
utils.is_subdict({"a": 1, "b": 1, "c": 2}, {"a": 1, "b": 1})
)
self.assertFalse(
utils.is_subdict({"a": 1, "b": None}, {"a": 1})
)
self.assertTrue(utils.is_subdict({}, {"a": 1}))
self.assertTrue(utils.is_subdict({"a": 1}, {"a": 1, "b": 1}))
self.assertTrue(utils.is_subdict({"a": 1, "b": 1}, {"a": 1, "b": 1}))
@mock.patch("fuel_mirror.common.utils.open")
def test_get_fuel_settings(self, m_open):
m_open().__enter__.side_effect = [
six.StringIO(
'ADMIN_NETWORK:\n'
' ipaddress: "10.20.0.4"\n'
'FUEL_ACCESS:\n'
' user: "test"\n'
' password: "test_pwd"\n',
),
OSError
]
self.assertEqual(
{
"server": "10.20.0.4",
"user": "test",
"password": "test_pwd",
},
utils.get_fuel_settings()
)
self.assertEqual(
{},
utils.get_fuel_settings()
)
@mock.patch("fuel_mirror.common.utils.yaml")
@mock.patch("fuel_mirror.common.utils.open")
def test_load_input_data(self, open_mock, yaml_mock):
data = "$param1: $param2"
open_mock().__enter__().read.return_value = data
v = utils.load_input_data("data.yaml", param1="key", param2="value")
open_mock.assert_called_with("data.yaml", "r")
yaml_mock.load.assert_called_once_with("key: value")
self.assertIs(yaml_mock.load(), v)
|
py | 1a48ecc6d751700ea5ff151c58f07adae0779930 | #
# This file is part of EPIMODELS
# (https://github.com/I-Bouros/multi-epi-model-cross-analysis.git) which is
# released under the MIT license. See accompanying LICENSE for copyright
# notice and full license details.
#
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.testing as pdt
import epimodels as em
class TestContactMatrixClass(unittest.TestCase):
"""
Test the 'ContactMatrix' class.
"""
def test__init__(self):
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
c = em.ContactMatrix(age_groups, data_matrix)
self.assertEqual(c._num_a_groups, 2)
npt.assert_array_equal(c.ages, np.array(['0-10', '10-25']))
pdt.assert_frame_equal(
c.contact_matrix,
pd.DataFrame(
data=np.array([[10, 5.2], [0, 3]]),
index=['0-10', '10-25'],
columns=['0-10', '10-25']))
with self.assertRaises(ValueError):
em.ContactMatrix('0', data_matrix)
with self.assertRaises(TypeError):
em.ContactMatrix([0, '1'], data_matrix)
with self.assertRaises(TypeError):
em.ContactMatrix(['0', 1], data_matrix)
with self.assertRaises(ValueError):
em.ContactMatrix(age_groups, [1])
with self.assertRaises(ValueError):
em.ContactMatrix(age_groups, np.array([[10, 5, 0], [0, 0, 3]]))
with self.assertRaises(ValueError):
em.ContactMatrix(age_groups, np.array([[10, 5], [0, 3], [0, 0]]))
with self.assertRaises(TypeError):
em.ContactMatrix(age_groups, np.array([[10, 5], [0, '3']]))
def test_get_age_groups(self):
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
c = em.ContactMatrix(age_groups, data_matrix)
self.assertEqual(
c.get_age_groups(),
"Polpulation is split into 2 age groups: ['0-10', '10-25'].")
def test_change_age_groups(self):
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
c = em.ContactMatrix(age_groups, data_matrix)
new_age_groups = ['0-15', '15-25']
c.change_age_groups(new_age_groups)
self.assertEqual(c._num_a_groups, 2)
npt.assert_array_equal(c.ages, np.array(['0-15', '15-25']))
pdt.assert_frame_equal(
c.contact_matrix,
pd.DataFrame(
data=np.array([[10, 5.2], [0, 3]]),
index=['0-15', '15-25'],
columns=['0-15', '15-25']))
with self.assertRaises(ValueError):
c.change_age_groups(['0-15', '15-25', '25+'])
def test_plot_heat_map(self):
with patch('plotly.graph_objs.Figure.show') as show_patch:
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
c = em.ContactMatrix(age_groups, data_matrix)
c.plot_heat_map()
# Assert show_figure is called once
assert show_patch.called
class TestRegionMatrixClass(unittest.TestCase):
"""
Test the 'RegionMatrix' class.
"""
def test__init__(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
r = em.RegionMatrix(region_name, age_groups, data_matrix)
self.assertEqual(r.region, 'London')
self.assertEqual(r.num_a_groups, 2)
npt.assert_array_equal(r.ages, np.array(['0-10', '10-25']))
pdt.assert_frame_equal(
r.region_matrix,
pd.DataFrame(
data=np.array([[10, 5.2], [0, 3]]),
index=['0-10', '10-25'],
columns=['0-10', '10-25']))
with self.assertRaises(ValueError):
em.RegionMatrix(region_name, '0', data_matrix)
with self.assertRaises(TypeError):
em.RegionMatrix(region_name, [0, '1'], data_matrix)
with self.assertRaises(TypeError):
em.RegionMatrix(region_name, ['0', 1], data_matrix)
with self.assertRaises(ValueError):
em.RegionMatrix(region_name, age_groups, [1])
with self.assertRaises(ValueError):
em.RegionMatrix(
region_name, age_groups, np.array([[10, 5, 0], [0, 0, 3]]))
with self.assertRaises(ValueError):
em.RegionMatrix(
region_name, age_groups, np.array([[10, 5], [0, 3], [0, 0]]))
with self.assertRaises(TypeError):
em.RegionMatrix(
region_name, age_groups, np.array([[10, 5], [0, '3']]))
with self.assertRaises(TypeError):
em.RegionMatrix([0], age_groups, data_matrix)
def test_change_region_name(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
r = em.RegionMatrix(region_name, age_groups, data_matrix)
new_region_name = 'Oxford'
r.change_region_name(new_region_name)
self.assertEqual(r.region, 'Oxford')
with self.assertRaises(TypeError):
r.change_region_name(0)
def test_change_age_groups(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
r = em.RegionMatrix(region_name, age_groups, data_matrix)
new_age_groups = ['0-15', '15-25']
r.change_age_groups(new_age_groups)
self.assertEqual(r.num_a_groups, 2)
npt.assert_array_equal(r.ages, np.asarray(['0-15', '15-25']))
pdt.assert_frame_equal(
r.region_matrix,
pd.DataFrame(
data=np.array([[10, 5.2], [0, 3]]),
index=['0-15', '15-25'],
columns=['0-15', '15-25']))
with self.assertRaises(ValueError):
r.change_age_groups(['0-15', '15-25', '25+'])
def test_plot_heat_map(self):
with patch('plotly.graph_objs.Figure.show') as show_patch:
region_name = 'London'
age_groups = ['0-10', '10-25']
data_matrix = np.array([[10, 5.2], [0, 3]])
r = em.RegionMatrix(region_name, age_groups, data_matrix)
r.plot_heat_map()
# Assert show_figure is called once
assert show_patch.called
class TestUniNextGenMatrixClass(unittest.TestCase):
"""
Test the 'UniNextGenMatrix' class.
"""
def test__init__(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
contact_data_matrix = np.array([[10, 5.2], [0, 3]])
region_data_matrix = np.array([[0.5, 1.2], [0.29, 6]])
pop_size = [18, 2]
dI = 4
contacts = em.ContactMatrix(age_groups, contact_data_matrix)
regional = em.RegionMatrix(region_name, age_groups, region_data_matrix)
next_gen = em.UniNextGenMatrix(pop_size, contacts, regional, dI)
self.assertEqual(next_gen.region, 'London')
npt.assert_array_equal(next_gen.ages, np.array(['0-10', '10-25']))
npt.assert_array_equal(next_gen.susceptibles, np.array([18, 2]))
npt.assert_array_equal(next_gen.contacts, contact_data_matrix)
npt.assert_array_equal(next_gen.regional_suscep, region_data_matrix)
self.assertEqual(next_gen.infection_period, 4)
pdt.assert_frame_equal(
next_gen.next_gen_matrix,
pd.DataFrame(
data=np.array([[360, 449.28], [0, 144]]),
index=['0-10', '10-25'],
columns=['0-10', '10-25']))
with self.assertRaises(TypeError):
em.UniNextGenMatrix(pop_size, 0, regional, dI)
with self.assertRaises(TypeError):
em.UniNextGenMatrix(pop_size, contacts, 0, dI)
with self.assertRaises(ValueError):
new_age_groups = ['0-15', '15-25']
regional1 = em.RegionMatrix(
region_name, new_age_groups, region_data_matrix)
em.UniNextGenMatrix(pop_size, contacts, regional1, dI)
with self.assertRaises(TypeError):
em.UniNextGenMatrix(pop_size, contacts, regional, '4')
with self.assertRaises(ValueError):
em.UniNextGenMatrix(pop_size, contacts, regional, 0)
with self.assertRaises(ValueError):
em.UniNextGenMatrix([[1], [2]], contacts, regional, dI)
with self.assertRaises(ValueError):
em.UniNextGenMatrix([0, 1, 1], contacts, regional, dI)
with self.assertRaises(ValueError):
em.UniNextGenMatrix([0, -1], contacts, regional, dI)
with self.assertRaises(TypeError):
em.UniNextGenMatrix([0, '1'], contacts, regional, dI)
def test_compute_dom_eigenvalue(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
contact_data_matrix = np.array([[10, 0], [0, 3]])
region_data_matrix = np.array([[0.5, 0], [0, 6]])
pop_size = [1, 2]
dI = 4
contacts = em.ContactMatrix(age_groups, contact_data_matrix)
regional = em.RegionMatrix(region_name, age_groups, region_data_matrix)
next_gen = em.UniNextGenMatrix(pop_size, contacts, regional, dI)
self.assertEqual(next_gen.compute_dom_eigenvalue(), 144)
class TestUniInfectivityMatrixClass(unittest.TestCase):
"""
Test the 'UniInfectivityMatrix' class.
"""
def test__init__(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
# Initial state of the system
contact_data_matrix_0 = np.array([[10, 0], [0, 3]])
region_data_matrix_0 = np.array([[0.5, 0], [0, 6]])
init_pop_size = [1, 2]
dI = 4
contacts_0 = em.ContactMatrix(age_groups, contact_data_matrix_0)
regional_0 = em.RegionMatrix(
region_name, age_groups, region_data_matrix_0)
next_gen_0 = em.UniNextGenMatrix(
init_pop_size, contacts_0, regional_0, dI)
initial_r = 0.5
infect = em.UniInfectivityMatrix(
initial_r,
initial_nextgen_matrix=next_gen_0)
self.assertEqual(infect.r0, 0.5)
self.assertEqual(infect.r0_star, 144)
with self.assertRaises(TypeError):
em.UniInfectivityMatrix(
'0',
initial_nextgen_matrix=next_gen_0)
with self.assertRaises(TypeError):
em.UniInfectivityMatrix(
initial_r,
initial_nextgen_matrix=0)
def test_compute_prob_infectivity_matrix(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
# Initial state of the system
contact_data_matrix_0 = np.array([[10, 0], [0, 3]])
region_data_matrix_0 = np.array([[0.5, 0], [0, 6]])
init_pop_size = [1, 2]
dI = 4
contacts_0 = em.ContactMatrix(age_groups, contact_data_matrix_0)
regional_0 = em.RegionMatrix(
region_name, age_groups, region_data_matrix_0)
next_gen_0 = em.UniNextGenMatrix(
init_pop_size, contacts_0, regional_0, dI)
# Later time state of the system
contact_data_matrix_1 = np.array([[10, 5.2], [0, 3]])
region_data_matrix_1 = np.array([[0.5, 1.2], [0.29, 6]])
current_pop_size = [18, 2]
contacts_1 = em.ContactMatrix(age_groups, contact_data_matrix_1)
regional_1 = em.RegionMatrix(
region_name, age_groups, region_data_matrix_1)
next_gen_1 = em.UniNextGenMatrix(
current_pop_size, contacts_1, regional_1, dI)
initial_r = 0.5
temp_variation = 1
infect = em.UniInfectivityMatrix(
initial_r,
initial_nextgen_matrix=next_gen_0)
npt.assert_array_equal(
infect.compute_prob_infectivity_matrix(temp_variation, next_gen_1),
np.array([[5/288, 13/600], [0, 1/16]]))
with self.assertRaises(TypeError):
infect.compute_prob_infectivity_matrix('1', next_gen_1)
with self.assertRaises(TypeError):
infect.compute_prob_infectivity_matrix(temp_variation, 0)
def test_compute_reproduction_number(self):
region_name = 'London'
age_groups = ['0-10', '10-25']
# Initial state of the system
contact_data_matrix_0 = np.array([[10, 0], [0, 3]])
region_data_matrix_0 = np.array([[0.5, 0], [0, 6]])
init_pop_size = [1, 2]
dI = 4
contacts_0 = em.ContactMatrix(age_groups, contact_data_matrix_0)
regional_0 = em.RegionMatrix(
region_name, age_groups, region_data_matrix_0)
next_gen_0 = em.UniNextGenMatrix(
init_pop_size, contacts_0, regional_0, dI)
# Later time state of the system
contact_data_matrix_1 = np.array([[10, 5.2], [0, 3]])
region_data_matrix_1 = np.array([[0.5, 1.2], [0.29, 6]])
current_pop_size = [18, 2]
contacts_1 = em.ContactMatrix(age_groups, contact_data_matrix_1)
regional_1 = em.RegionMatrix(
region_name, age_groups, region_data_matrix_1)
next_gen_1 = em.UniNextGenMatrix(
current_pop_size, contacts_1, regional_1, dI)
initial_r = 0.5
temp_variation = 1
infect = em.UniInfectivityMatrix(
initial_r,
initial_nextgen_matrix=next_gen_0)
self.assertEqual(
infect.compute_reproduction_number(
temp_variation, next_gen_1), 5/4)
with self.assertRaises(TypeError):
infect.compute_reproduction_number('1', next_gen_1)
with self.assertRaises(TypeError):
infect.compute_reproduction_number(temp_variation, 0)
class TestMultiTimesInfectivityClass(unittest.TestCase):
"""
Test the 'MultiTimesInfectivity' class.
"""
def test__init__(self):
regions = ['London', 'Cornwall']
age_groups = ['0-10', '10-25']
# Initial state of the system
contact_data_matrix_0 = np.array([[10, 0], [0, 3]])
contact_data_matrix_1 = np.array([[10, 5.2], [0, 3]])
region_data_matrix_0_0 = np.array([[0.5, 0], [0, 6]])
region_data_matrix_0_1 = np.array([[1, 10], [1, 0]])
region_data_matrix_1_0 = np.array([[0.5, 1.2], [0.29, 6]])
region_data_matrix_1_1 = np.array([[0.85, 1], [0.9, 6]])
susceptibles = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[0, 2], [1, 1]]]
dI = 4
contacts_0 = em.ContactMatrix(age_groups, contact_data_matrix_0)
contacts_1 = em.ContactMatrix(age_groups, contact_data_matrix_1)
regional_0_0 = em.RegionMatrix(
regions[0], age_groups, region_data_matrix_0_0)
regional_0_1 = em.RegionMatrix(
regions[1], age_groups, region_data_matrix_0_1)
regional_1_0 = em.RegionMatrix(
regions[0], age_groups, region_data_matrix_1_0)
regional_1_1 = em.RegionMatrix(
regions[1], age_groups, region_data_matrix_1_1)
# Matrices contact
matrices_contact = [contacts_0, contacts_1]
time_changes_contact = [1, 3]
matrices_region = [
[regional_0_0, regional_0_1],
[regional_1_0, regional_1_1]]
time_changes_region = [1, 2]
initial_r = [0.5, 1]
m = em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
self.assertEqual(m._regions, ['London', 'Cornwall'])
npt.assert_array_equal(m.initial_r, np.array([0.5, 1]))
self.assertEqual(m.dI, 4)
npt.assert_array_equal(m.times_contact, np.array([1, 3]))
npt.assert_array_equal(m.times_region, np.array([1, 2]))
self.assertCountEqual(m.contact_matrices, matrices_contact)
self.assertCountEqual(m.region_matrices, matrices_region)
self.assertEqual(len(m.initial_infec_matrices), 2)
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
0,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(TypeError):
em.MultiTimesInfectivity(
[contacts_0, 0],
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
1,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
[1],
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(TypeError):
em.MultiTimesInfectivity(
matrices_contact,
[1, 1.5],
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
[0, 1],
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
'London',
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(TypeError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
[0, 'London'],
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
[regional_1_0, regional_1_1],
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
matrices_region_1 = [[regional_0_0], [regional_1_0]]
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region_1,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(TypeError):
matrices_region_1 = [
[regional_0_0, 1],
[regional_1_0, regional_1_1]
]
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region_1,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
matrices_region_1 = [
[regional_0_0, regional_0_1],
[
regional_1_0,
em.RegionMatrix(
regions[0],
age_groups,
region_data_matrix_1_1)]
]
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region_1,
time_changes_region,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
1,
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
[1, 2, 3],
initial_r,
dI,
susceptibles[0])
with self.assertRaises(TypeError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
[1, '2'],
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
[0, 2],
initial_r,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
0.5,
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
[0.5],
dI,
susceptibles[0])
with self.assertRaises(TypeError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
[0.5, '1'],
dI,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
[0.5, 0],
dI,
susceptibles[0])
with self.assertRaises(TypeError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
'4',
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
0,
susceptibles[0])
with self.assertRaises(ValueError):
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
[1])
with self.assertRaises(ValueError):
susceptibles_1 = [[[1], [3]], [[5], [7]], [[0], [1]]]
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles_1[0])
with self.assertRaises(ValueError):
susceptibles_1 = [[[1, 2]], [[5, 6]], [[0, 2]]]
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles_1[0])
with self.assertRaises(TypeError):
susceptibles_1 = [
[[1, '2'], [3, 4]], [[5, 6], [7, 8]], [[0, 2], [1, 1]]]
em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles_1[0])
def test_compute_prob_infectivity_matrix(self):
regions = ['London', 'Cornwall']
age_groups = ['0-10', '10-25']
# Initial state of the system
contact_data_matrix_0 = np.array([[10, 0], [0, 3]])
contact_data_matrix_1 = np.array([[10, 5.2], [0, 3]])
region_data_matrix_0_0 = np.array([[0.5, 0], [0, 6]])
region_data_matrix_0_1 = np.array([[1, 10], [1, 0]])
region_data_matrix_1_0 = np.array([[0.5, 1.2], [0.29, 6]])
region_data_matrix_1_1 = np.array([[0.85, 1], [0.9, 6]])
susceptibles = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[0, 2], [1, 1]]]
dI = 4
contacts_0 = em.ContactMatrix(age_groups, contact_data_matrix_0)
contacts_1 = em.ContactMatrix(age_groups, contact_data_matrix_1)
regional_0_0 = em.RegionMatrix(
regions[0], age_groups, region_data_matrix_0_0)
regional_0_1 = em.RegionMatrix(
regions[1], age_groups, region_data_matrix_0_1)
regional_1_0 = em.RegionMatrix(
regions[0], age_groups, region_data_matrix_1_0)
regional_1_1 = em.RegionMatrix(
regions[1], age_groups, region_data_matrix_1_1)
# Matrices contact
matrices_contact = [contacts_0, contacts_1]
time_changes_contact = [1, 3]
matrices_region = [
[regional_0_0, regional_0_1],
[regional_1_0, regional_1_1]]
time_changes_region = [1, 2]
initial_r = [0.5, 1]
m = em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
npt.assert_array_equal(
m.compute_prob_infectivity_matrix(1, 3, susceptibles[2][0], 1),
np.array([[5/288, 13/600], [0, 1/16]]))
with self.assertRaises(TypeError):
m.compute_prob_infectivity_matrix('1', 3, susceptibles[2][0], 1)
with self.assertRaises(ValueError):
m.compute_prob_infectivity_matrix(3, 3, susceptibles[2][0], 1)
with self.assertRaises(ValueError):
m.compute_prob_infectivity_matrix(0, 3, susceptibles[2][0], 1)
with self.assertRaises(TypeError):
m.compute_prob_infectivity_matrix(1, '3', susceptibles[2][0], 1)
with self.assertRaises(ValueError):
m.compute_prob_infectivity_matrix(1, 0, susceptibles[2][0], 1)
with self.assertRaises(TypeError):
m.compute_prob_infectivity_matrix(1, 3, susceptibles[2][0], '1')
with self.assertRaises(ValueError):
m.compute_prob_infectivity_matrix(1, 3, [[5, 6], [7, 8]], 1)
with self.assertRaises(ValueError):
m.compute_prob_infectivity_matrix(1, 3, [5, 6, 0], 1)
with self.assertRaises(TypeError):
m.compute_prob_infectivity_matrix(1, 3, [5, '6'], 1)
with self.assertRaises(ValueError):
m.compute_prob_infectivity_matrix(1, 3, [5, -6], 1)
def test_compute_reproduction_number(self):
regions = ['London', 'Cornwall']
age_groups = ['0-10', '10-25']
# Initial state of the system
contact_data_matrix_0 = np.array([[10, 0], [0, 3]])
contact_data_matrix_1 = np.array([[10, 5.2], [0, 3]])
region_data_matrix_0_0 = np.array([[0.5, 0], [0, 6]])
region_data_matrix_0_1 = np.array([[1, 10], [1, 0]])
region_data_matrix_1_0 = np.array([[0.5, 1.2], [0.29, 6]])
region_data_matrix_1_1 = np.array([[0.85, 1], [0.9, 6]])
susceptibles = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[0, 2], [1, 1]]]
dI = 4
contacts_0 = em.ContactMatrix(age_groups, contact_data_matrix_0)
contacts_1 = em.ContactMatrix(age_groups, contact_data_matrix_1)
regional_0_0 = em.RegionMatrix(
regions[0], age_groups, region_data_matrix_0_0)
regional_0_1 = em.RegionMatrix(
regions[1], age_groups, region_data_matrix_0_1)
regional_1_0 = em.RegionMatrix(
regions[0], age_groups, region_data_matrix_1_0)
regional_1_1 = em.RegionMatrix(
regions[1], age_groups, region_data_matrix_1_1)
# Matrices contact
matrices_contact = [contacts_0, contacts_1]
time_changes_contact = [1, 3]
matrices_region = [
[regional_0_0, regional_0_1],
[regional_1_0, regional_1_1]]
time_changes_region = [1, 2]
initial_r = [0.5, 1]
m = em.MultiTimesInfectivity(
matrices_contact,
time_changes_contact,
regions,
matrices_region,
time_changes_region,
initial_r,
dI,
susceptibles[0])
self.assertEqual(m.compute_reproduction_number(
1, 3, susceptibles[2][0]), 0.5)
with self.assertRaises(TypeError):
m.compute_reproduction_number('1', 3, susceptibles[2][0], 1)
with self.assertRaises(ValueError):
m.compute_reproduction_number(3, 3, susceptibles[2][0], 1)
with self.assertRaises(ValueError):
m.compute_reproduction_number(0, 3, susceptibles[2][0], 1)
with self.assertRaises(TypeError):
m.compute_reproduction_number(1, '3', susceptibles[2][0], 1)
with self.assertRaises(ValueError):
m.compute_reproduction_number(1, 0, susceptibles[2][0], 1)
with self.assertRaises(TypeError):
m.compute_reproduction_number(1, 3, susceptibles[2][0], '1')
|
py | 1a48ef3ae6fc02340fb9a9c4fd4c621c4a5dd27c | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 14:50:35 2018
@author: Ashutosh Verma
"""
'''
Write a program that calculates and prints the value according to the given formula:
Q = Square root of [(2 * C * D)/H]
Following are the fixed values of C and H:
C is 50. H is 30.
D is the variable whose values should be input to your program in a comma-separated sequence.
Example
Let us assume the following comma separated input sequence is given to the program:
100,150,180
The output of the program should be:
18,22,24
'''
import math
c=50
h=30
value = []
items=[x for x in input().split(',')]
for d in items:
value.append(str(int(round(math.sqrt(2*c*float(d)/h)))))
print (','.join(value)) |
py | 1a48ef3f8de0abda7867c55712f8298247ec1034 | #!/bin/env python
# pylint: disable=E1101, W0201, E1103
# E1101: reference config file variables
# W0201: Don't much around with __init__
# E1103: Use thread members
from __future__ import print_function
from builtins import range, object
import cProfile
import os
import pickle
import pstats
import random
import threading
import time
import unittest
from Utils.PythonVersion import PY3
from WMCore_t.WMSpec_t.TestSpec import testWorkload
from nose.plugins.attrib import attr
from WMComponent.JobCreator.JobCreatorPoller import JobCreatorPoller, capResourceEstimates
from WMCore.Agent.HeartbeatAPI import HeartbeatAPI
from WMCore.DAOFactory import DAOFactory
from WMCore.DataStructs.Run import Run
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.Services.UUIDLib import makeUUID
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.WMSpec.Makers.TaskMaker import TaskMaker
from WMQuality.Emulators import EmulatorSetup
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
from WMQuality.TestInitCouchApp import TestInitCouchApp as TestInit
class JobCreatorTest(EmulatedUnitTestCase):
"""
Test case for the JobCreator
"""
sites = ['T2_US_Florida', 'T2_US_UCSD', 'T2_TW_Taiwan', 'T1_CH_CERN']
def setUp(self):
"""
_setUp_
Setup the database and logging connection. Try to create all of the
WMBS tables. Also, create some dummy locations.
"""
super(JobCreatorTest, self).setUp()
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules=['WMCore.WMBS', 'WMCore.ResourceControl', 'WMCore.Agent.Database'],
useDefault=False)
self.couchdbname = "jobcreator_t"
self.testInit.setupCouch("%s/jobs" % self.couchdbname, "JobDump")
self.testInit.setupCouch("%s/fwjrs" % self.couchdbname, "FWJRDump")
self.configFile = EmulatorSetup.setupWMAgentConfig()
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
locationAction = self.daoFactory(classname="Locations.New")
for site in self.sites:
locationAction.execute(siteName=site, pnn=site)
# Create sites in resourceControl
resourceControl = ResourceControl()
for site in self.sites:
resourceControl.insertSite(siteName=site, pnn=site, ceName=site)
resourceControl.insertThreshold(siteName=site, taskType='Processing', maxSlots=10000, pendingSlots=10000)
self.resourceControl = resourceControl
self._setup = True
self._teardown = False
self.testDir = self.testInit.generateWorkDir()
self.cwd = os.getcwd()
# Set heartbeat
self.componentName = 'JobCreator'
self.heartbeatAPI = HeartbeatAPI(self.componentName)
self.heartbeatAPI.registerComponent()
if PY3:
self.assertItemsEqual = self.assertCountEqual
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.clearDatabase(modules=['WMCore.WMBS', 'WMCore.ResourceControl', 'WMCore.Agent.Database'])
self.testInit.delWorkDir()
self._teardown = True
self.testInit.tearDownCouch()
EmulatorSetup.deleteConfig(self.configFile)
return
def createJobCollection(self, name, nSubs, nFiles, workflowURL='test'):
"""
_createJobCollection_
Create a collection of jobs
"""
myThread = threading.currentThread()
testWorkflow = Workflow(spec=workflowURL, owner="mnorman",
name=name, task="/TestWorkload/ReReco")
testWorkflow.create()
for sub in range(nSubs):
nameStr = '%s-%i' % (name, sub)
myThread.transaction.begin()
testFileset = Fileset(name=nameStr)
testFileset.create()
for f in range(nFiles):
# pick a random site
site = random.choice(self.sites)
testFile = File(lfn="/lfn/%s/%i" % (nameStr, f), size=1024, events=10)
testFile.setLocation(site)
testFile.create()
testFileset.addFile(testFile)
testFileset.commit()
testSubscription = Subscription(fileset=testFileset,
workflow=testWorkflow,
type="Processing",
split_algo="FileBased")
testSubscription.create()
myThread.transaction.commit()
return
def createWorkload(self, workloadName='Test'):
"""
_createTestWorkload_
Creates a test workload for us to run on, hold the basic necessities.
"""
workload = testWorkload(workloadName)
rereco = workload.getTask("ReReco")
seederDict = {"generator.initialSeed": 1001, "evtgenproducer.initialSeed": 1001}
rereco.addGenerator("PresetSeeder", **seederDict)
taskMaker = TaskMaker(workload, os.path.join(self.testDir, 'workloadTest'))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
return workload
def getConfig(self):
"""
_getConfig_
Creates a common config.
"""
config = self.testInit.getConfiguration()
self.testInit.generateWorkDir(config)
# First the general stuff
config.section_("General")
config.General.workDir = os.getenv("TESTDIR", os.getcwd())
config.section_("Agent")
config.Agent.componentName = self.componentName
# Now the CoreDatabase information
# This should be the dialect, dburl, etc
config.section_("CoreDatabase")
config.CoreDatabase.connectUrl = os.getenv("DATABASE")
config.CoreDatabase.socket = os.getenv("DBSOCK")
config.component_("JobCreator")
config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'
# The log level of the component.
# config.JobCreator.logLevel = 'SQLDEBUG'
config.JobCreator.logLevel = 'INFO'
# maximum number of threads we want to deal
# with messages per pool.
config.JobCreator.maxThreads = 1
config.JobCreator.UpdateFromResourceControl = True
config.JobCreator.pollInterval = 10
# config.JobCreator.jobCacheDir = self.testDir
config.JobCreator.defaultJobType = 'processing' # Type of jobs that we run, used for resource control
config.JobCreator.workerThreads = 4
config.JobCreator.componentDir = self.testDir
config.JobCreator.useWorkQueue = True
config.JobCreator.WorkQueueParams = {'emulateDBSReader': True}
# We now call the JobMaker from here
config.component_('JobMaker')
config.JobMaker.logLevel = 'INFO'
config.JobMaker.namespace = 'WMCore.WMSpec.Makers.JobMaker'
config.JobMaker.maxThreads = 1
config.JobMaker.makeJobsHandler = 'WMCore.WMSpec.Makers.Handlers.MakeJobs'
# JobStateMachine
config.component_('JobStateMachine')
config.JobStateMachine.couchurl = os.getenv('COUCHURL', 'cmssrv52.fnal.gov:5984')
config.JobStateMachine.couchDBName = self.couchdbname
return config
def testVerySimpleTest(self):
"""
_VerySimpleTest_
Just test that everything works...more or less
"""
# return
myThread = threading.currentThread()
config = self.getConfig()
name = makeUUID()
nSubs = 5
nFiles = 10
workloadName = 'TestWorkload'
dummyWorkload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
testJobCreator = JobCreatorPoller(config=config)
# First, can we run once without everything crashing?
testJobCreator.algorithm()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), nSubs * nFiles)
# Count database objects
result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
self.assertEqual(len(result), nSubs * nFiles)
# Find the test directory
testDirectory = os.path.join(self.testDir, 'jobCacheDir', 'TestWorkload', 'ReReco')
# It should have at least one jobGroup
self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory))
# But no more then twenty
self.assertTrue(len(os.listdir(testDirectory)) <= 20)
groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0')
# First job should be in here
listOfDirs = []
for tmpDirectory in os.listdir(testDirectory):
listOfDirs.extend(os.listdir(os.path.join(testDirectory, tmpDirectory)))
self.assertTrue('job_1' in listOfDirs)
self.assertTrue('job_2' in listOfDirs)
self.assertTrue('job_3' in listOfDirs)
jobDir = os.listdir(groupDirectory)[0]
jobFile = os.path.join(groupDirectory, jobDir, 'job.pkl')
self.assertTrue(os.path.isfile(jobFile))
f = open(jobFile, 'rb')
job = pickle.load(f)
f.close()
self.assertEqual(job.baggage.PresetSeeder.generator.initialSeed, 1001)
self.assertEqual(job.baggage.PresetSeeder.evtgenproducer.initialSeed, 1001)
self.assertEqual(job['workflow'], name)
self.assertEqual(len(job['input_files']), 1)
self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2')
return
@attr('performance', 'integration')
def testProfilePoller(self):
"""
Profile your performance
You shouldn't be running this normally because it doesn't do anything
"""
name = makeUUID()
nSubs = 5
nFiles = 1500
workloadName = 'TestWorkload'
workload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
config = self.getConfig()
testJobCreator = JobCreatorPoller(config=config)
cProfile.runctx("testJobCreator.algorithm()", globals(), locals(), filename="testStats.stat")
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
time.sleep(10)
self.assertEqual(len(result), nSubs * nFiles)
p = pstats.Stats('testStats.stat')
p.sort_stats('cumulative')
p.print_stats(.2)
return
@attr('integration')
def testProfileWorker(self):
"""
Profile where the work actually gets done
You shouldn't be running this one either, since it doesn't test anything.
"""
name = makeUUID()
nSubs = 5
nFiles = 500
workloadName = 'TestWorkload'
workload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
config = self.getConfig()
configDict = {"couchURL": config.JobStateMachine.couchurl,
"couchDBName": config.JobStateMachine.couchDBName,
'jobCacheDir': config.JobCreator.jobCacheDir,
'defaultJobType': config.JobCreator.defaultJobType}
subs = [{"subscription": 1}, {"subscription": 2}, {"subscription": 3}, {"subscription": 4},
{"subscription": 5}]
testJobCreator = JobCreatorPoller(**configDict)
cProfile.runctx("testJobCreator.algorithm(parameters = input)", globals(), locals(), filename="workStats.stat")
p = pstats.Stats('workStats.stat')
p.sort_stats('cumulative')
p.print_stats(.2)
return
@attr('integration')
def testHugeTest(self):
"""
Don't run this one either
"""
myThread = threading.currentThread()
config = self.getConfig()
name = makeUUID()
nSubs = 10
nFiles = 5000
workloadName = 'Tier1ReReco'
dummyWorkload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.createJobCollection(name=name, nSubs=nSubs, nFiles=nFiles, workflowURL=workloadPath)
testJobCreator = JobCreatorPoller(config=config)
# First, can we run once without everything crashing?
startTime = time.time()
testJobCreator.algorithm()
stopTime = time.time()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), nSubs * nFiles)
print("Job took %f seconds to run" % (stopTime - startTime))
# Count database objects
result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
self.assertEqual(len(result), nSubs * nFiles)
return
def stuffWMBS(self, workflowURL, name):
"""
_stuffWMBS_
Insert some dummy jobs, jobgroups, filesets, files and subscriptions
into WMBS to test job creation. Three completed job groups each
containing several files are injected. Another incomplete job group is
also injected. Also files are added to the "Mergeable" subscription as
well as to the output fileset for their jobgroups.
"""
locationAction = self.daoFactory(classname="Locations.New")
locationAction.execute(siteName="s1", pnn="somese.cern.ch")
mergeFileset = Fileset(name="mergeFileset")
mergeFileset.create()
bogusFileset = Fileset(name="bogusFileset")
bogusFileset.create()
mergeWorkflow = Workflow(spec=workflowURL, owner="mnorman",
name=name, task="/TestWorkload/ReReco")
mergeWorkflow.create()
mergeSubscription = Subscription(fileset=mergeFileset,
workflow=mergeWorkflow,
split_algo="ParentlessMergeBySize")
mergeSubscription.create()
dummySubscription = Subscription(fileset=bogusFileset,
workflow=mergeWorkflow,
split_algo="ParentlessMergeBySize")
file1 = File(lfn="file1", size=1024, events=1024, first_event=0,
locations={"somese.cern.ch"})
file1.addRun(Run(1, *[45]))
file1.create()
file2 = File(lfn="file2", size=1024, events=1024, first_event=1024, locations={"somese.cern.ch"})
file2.addRun(Run(1, *[45]))
file2.create()
file3 = File(lfn="file3", size=1024, events=1024, first_event=2048, locations={"somese.cern.ch"})
file3.addRun(Run(1, *[45]))
file3.create()
file4 = File(lfn="file4", size=1024, events=1024, first_event=3072, locations={"somese.cern.ch"})
file4.addRun(Run(1, *[45]))
file4.create()
fileA = File(lfn="fileA", size=1024, events=1024, first_event=0, locations={"somese.cern.ch"})
fileA.addRun(Run(1, *[46]))
fileA.create()
fileB = File(lfn="fileB", size=1024, events=1024, first_event=1024, locations={"somese.cern.ch"})
fileB.addRun(Run(1, *[46]))
fileB.create()
fileC = File(lfn="fileC", size=1024, events=1024, first_event=2048, locations={"somese.cern.ch"})
fileC.addRun(Run(1, *[46]))
fileC.create()
fileI = File(lfn="fileI", size=1024, events=1024, first_event=0, locations={"somese.cern.ch"})
fileI.addRun(Run(2, *[46]))
fileI.create()
fileII = File(lfn="fileII", size=1024, events=1024, first_event=1024, locations={"somese.cern.ch"})
fileII.addRun(Run(2, *[46]))
fileII.create()
fileIII = File(lfn="fileIII", size=1024, events=1024, first_event=2048, locations={"somese.cern.ch"})
fileIII.addRun(Run(2, *[46]))
fileIII.create()
fileIV = File(lfn="fileIV", size=1024 * 1000000, events=1024, first_event=3072, locations={"somese.cern.ch"})
fileIV.addRun(Run(2, *[46]))
fileIV.create()
for fileObj in [file1, file2, file3, file4, fileA, fileB, fileC, fileI, fileII, fileIII, fileIV]:
mergeFileset.addFile(fileObj)
bogusFileset.addFile(fileObj)
mergeFileset.commit()
bogusFileset.commit()
return
def testTestNonProxySplitting(self):
"""
_TestNonProxySplitting_
Test and see if we can split things without a proxy.
"""
config = self.getConfig()
config.JobCreator.workerThreads = 1
name = makeUUID()
workloadName = 'TestWorkload'
workload = self.createWorkload(workloadName=workloadName)
workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl')
self.stuffWMBS(workflowURL=workloadPath, name=name)
testJobCreator = JobCreatorPoller(config=config)
testJobCreator.algorithm()
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state='Created', jobType="Processing")
self.assertEqual(len(result), 1)
result = getJobsAction.execute(state='Created', jobType="Merge")
self.assertEqual(len(result), 0)
return
def testCapResourceEstimates(self):
"""
_testCapResourceEstimates_
Test capResourceEstimates function to make sure the glideinwms
constraints are being properly considered.
"""
class JobGroup(object):
"""Dummy object holding a jobs attr full of jobs"""
def __init__(self):
self.jobs = []
constraints = {'MaxRequestDiskKB': 20971520, 'MinRequestDiskKB': 1048576,
'MaxWallTimeSecs': 162000, 'MinWallTimeSecs': 3600}
jobGroups = []
jobGroup = JobGroup()
jobGroup.jobs.append({'estimatedJobTime': None, 'estimatedDiskUsage': None})
jobGroup.jobs.append({'estimatedJobTime': 0, 'estimatedDiskUsage': 0})
jobGroup.jobs.append({'estimatedJobTime': 10000, 'estimatedDiskUsage': 10 * 1000 * 1000})
jobGroup.jobs.append({'estimatedJobTime': 200000, 'estimatedDiskUsage': 100 * 1000 * 1000})
jobGroups.append(jobGroup)
capResourceEstimates(jobGroups, constraints)
self.assertItemsEqual(jobGroup.jobs[0], {'estimatedJobTime': 3600, 'estimatedDiskUsage': 1048576})
self.assertItemsEqual(jobGroup.jobs[1], {'estimatedJobTime': 3600, 'estimatedDiskUsage': 1048576})
self.assertItemsEqual(jobGroup.jobs[2], {'estimatedJobTime': 10000, 'estimatedDiskUsage': 10 * 1000 * 1000})
self.assertItemsEqual(jobGroup.jobs[3], {'estimatedJobTime': 162000, 'estimatedDiskUsage': 20971520})
return
if __name__ == "__main__":
unittest.main()
|
py | 1a48f0ec892535f9637b88af7be18c6c504d6dc7 | from config import DB_PATH, SQLALCHEMY_DATABASE_URI
from db import tweets, page_views
from dummy import *
from datetime import datetime, timedelta, timezone
import json
import pandas as pd
import pytz
import SessionState
import streamlit as st
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# from bokeh.plotting import figure
# from bokeh.models import ColumnDataSource, CustomJS
# from bokeh.models import DataTable, TableColumn, HTMLTemplateFormatter
from dotenv import load_dotenv
load_dotenv(override=True)
from loguru import logger
from streamlit.report_thread import get_report_ctx
from telegram.utils.helpers import escape_markdown
local_timzeone = pytz.timezone('America/Toronto')
def log_session_id_formatter(record):
if "session_id" in record["extra"]:
fmt = "<green>{time:YYYY-MM-DD at HH:mm:ss}</green> <blue>|{level: ^8}|</blue> <cyan>{module: ^10}:{function: ^15}:{line: >3}</cyan> |{extra[session_id]: >4}| - <level>{message}</level>"
else:
fmt = "<green>{time:YYYY-MM-DD at HH:mm:ss}</green> <blue>|{level: ^8}|</blue> <cyan>{module: ^10}:{function: ^15}:{line: >3}</cyan> - <level>{message}</level>"
return fmt + "\n{exception}"
logger.remove()
logger.add(sys.stdout, level="DEBUG", colorize=True, format=log_session_id_formatter, backtrace=True)
report_ctx = get_report_ctx()
province_index_dict = {
"ALL": 0,
"ON": 1,
"BC": 2,
"AB": 3,
"QC": 4,
"MB": 5,
"SK": 6,
"NB": 7,
"NS": 8,
"PEI": 9,
}
age_group_index_dict = {
"ANY": 0,
"18+": 1,
"30+": 2,
"40+": 3,
"50+": 4,
}
@st.cache(ttl=60)
def get_tweet_df():
logger.info(f"Reading Tweets from sqlite database")
# engine = create_engine(f'sqlite:///{DB_PATH}')
engine = create_engine(SQLALCHEMY_DATABASE_URI)
tweet_df = pd.read_sql_table('tweet', engine)
tweet_df['created_at'] = pd.to_datetime(tweet_df['created_at'], utc=True)
tweet_df = tweet_df.sort_values(by=['created_at'], ascending=False)
# tweet_df['created_at'] = pd.to_datetime(tweet_df['created_at']) \
# .dt.tz_localize('America/Toronto')
logger.info(f"Retrieved {len(tweet_df)} tweets")
return tweet_df
def insert_page_view(session_id, search_criteria):
# engine = create_engine(f'sqlite:///{DB_PATH}')
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine)
search_criteria_str = json.dumps(search_criteria)
with Session() as session:
ins = page_views.insert().values(
session_id=session_id,
search_criteria=search_criteria_str,
)
res = session.execute(ins)
session.commit()
current_session_id = report_ctx.session_id
with logger.contextualize(session_id=current_session_id):
logger.info(f"Started serving current page")
session_state = SessionState.get(province='ALL', age_group='ANY', city='', fsa='', keyword='',)
province_options = ("ALL", "ON", "BC", "AB", "QC", "MB", "SK", "NB", "NS", "PEI",)
age_group_options = ("ANY", "18+", "30+", "40+", "50+",)
st.set_page_config(page_title='Vaccine Updates (Canada)', layout='wide')
st.title('Vaccine Hunters Search')
st.write("""
### This website lets you search Vaccine Hunters tweets by province, age group and postal code.
[Vaccine Hunters](https://vaccinehunters.ca/)
[Vaccine Hunters Twitter](https://twitter.com/vaxhunterscan?lang=en)
""")
with st.beta_expander("Official Vaccine Sites"):
st.write("""
* [Canada Govt. Vaccine Site](https://www.canada.ca/en/public-health/services/diseases/coronavirus-disease-covid-19/vaccines.html)
* [Ontario Vaccine Site](https://covid-19.ontario.ca/book-vaccine/)
* [British Columbia Vaccine Site](https://www2.gov.bc.ca/gov/content/covid-19/vaccine/plan)
* [Quebec Vaccine Site](https://www.quebec.ca/en/health/health-issues/a-z/2019-coronavirus/progress-of-the-covid-19-vaccination)
* [Alberta Vaccine Site](https://www.alberta.ca/covid19-vaccine.aspx)
* [Manitoba Vaccine Site](https://www.gov.mb.ca/covid19/vaccine/index.html)
* [Saskatchewan Vaccine Site](https://www.saskatchewan.ca/covid19-vaccine)
* [Nova Scotia Vaccine Site](https://novascotia.ca/coronavirus/book-your-vaccination-appointment/)
* [New Brunswick Vaccine Site](https://www2.gnb.ca/content/gnb/en/corporate/promo/covid-19/nb-vaccine.html)
* [PEI Vaccine Site](https://www.princeedwardisland.ca/en/information/health-and-wellness/getting-covid-19-vaccine)
""")
tweet_df = get_tweet_df()
refresh = st.sidebar.button("Refresh Results")
if refresh:
logger.info("Refershing page")
st.caching.clear_cache()
query_params = st.experimental_get_query_params()
province_default = int(query_params["province"][0]) if "province" in query_params else 0
province = st.sidebar.selectbox(
"Your province?",
province_options,
index = province_default,
)
age_group_default = int(query_params["age_group"][0]) if "age_group" in query_params else 0
age_group = st.sidebar.selectbox(
"Your age group?",
age_group_options,
index = age_group_default,
)
city_default = query_params["city"][0] if "city" in query_params else ''
city = st.sidebar.text_input("City or Region (Please ensure it's correct spelling)", value=city_default)
fsa_default = query_params["fsa"][0] if "fsa" in query_params else ''
fsa = st.sidebar.text_input("FSA (First three characters of your postal code)", value=fsa_default)
keyword_default = query_params["keyword"][0] if "keyword" in query_params else ''
keyword = st.sidebar.text_input("Any specific keyword (eg pregnant, immuno-compromised)", value=keyword_default)
# session_state.age_group = age_group
# session_state.province = province
# session_state.city = city
# session_state.fsa = fsa
# session_state.keyword = keyword
st.experimental_set_query_params(
province=province_options.index(province),
age_group=age_group_options.index(age_group),
city=city,
fsa=fsa,
keyword=keyword,
)
st.sidebar.write("""
Contact: [[email protected]]([email protected])
Source Code: [Github Repo](https://github.com/riyadparvez/canada-vaccine-alerter)
Hosting is sponsored by: [Ukko Agro](https://ukko.ag/)
""")
search_criteria = {}
filtered_tweet_df = tweet_df
if province != 'ALL':
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['province'].str.contains(province, na=False, case=False)]
search_criteria['province'] = province
if age_group != 'ANY':
search_substr = age_group
if age_group == '30+':
search_substr = '18|30'
elif age_group == '40+':
search_substr = '18|30|40'
elif age_group == '50+':
search_substr = '18|30|40|50'
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['age_groups'].str.contains(search_substr, na=False, case=False)]
search_criteria['age_group'] = age_group
if len(fsa) > 0:
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['FSAs'].str.contains(fsa, na=False, case=False)]
search_criteria['fsa'] = fsa
if len(city) > 0:
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['cities'].str.contains(city, na=False, case=False)]
search_criteria['city'] = city
if len(keyword) > 0:
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['tweet_text'].str.contains(keyword, na=False, case=False)]
search_criteria['keyword'] = keyword
insert_page_view(current_session_id, search_criteria)
if filtered_tweet_df.empty:
search_substr = '|'.join([val for val in search_criteria.values()])
logger.info(f"Expanding search criteria 'keyword': {search_substr}")
filtered_tweet_df = tweet_df[tweet_df['tweet_text'].str.contains(search_substr, na=False, case=False)]
mask = filtered_tweet_df['province'].str.contains(province, na=False, case=False) | filtered_tweet_df['province'].isnull()
filtered_tweet_df = filtered_tweet_df[mask]
filtered_tweet_df = filtered_tweet_df.sort_values(by=['province', 'created_at',])
logger.warning(f"No results have been found for: {search_criteria}. Expanded search criteria.")
st.warning("""
#### We didn't find any results for your search criteria. You might still be eligible for vaccination.
#### We have expanded your search criteria to show you more matches. Please also look at other sources for vaccination opportunities.
""")
if len(search_criteria) > 20:
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['created_at'] > (datetime.now(timezone.utc) - timedelta(days=3))]
else:
filtered_tweet_df = filtered_tweet_df[filtered_tweet_df['created_at'] > (datetime.now(timezone.utc) - timedelta(days=7))]
if not filtered_tweet_df.empty:
logger.info(f"{len(filtered_tweet_df)} results found for: {search_criteria}")
st.caption(f"Found {len(filtered_tweet_df)} relevant tweets.")
# tweet_df['tweet_text'] = tweet_df.apply(lambda row: f"{row['tweet_text']}\nLink: (https://twitter.com/twitter/statuses/{row['tweet_id']})", axis=1)
filtered_tweet_df = filtered_tweet_df.replace({r'\s+$': '', r'^\s+': ''}, regex=True).replace(r'\n', ' ', regex=True)
filtered_tweet_df['tweet_text'] = \
filtered_tweet_df.apply(lambda row: f"[{escape_markdown(row['tweet_text'], version=2)}](https://twitter.com/twitter/statuses/{row['tweet_id']})", axis=1)
filtered_tweet_df['tweet_link'] = filtered_tweet_df['tweet_id'].map(lambda x: f"https://twitter.com/twitter/statuses/{x}")
filtered_tweet_df['cities'] = filtered_tweet_df['cities'].str.slice(1,-1)
filtered_tweet_df['cities'] = filtered_tweet_df['cities'].str.replace(r"'", '')
filtered_tweet_df['FSAs'] = filtered_tweet_df['FSAs'].str.slice(1,-1)
filtered_tweet_df['FSAs'] = filtered_tweet_df['FSAs'].str.replace(r"'", '')
filtered_tweet_df['age_groups'] = filtered_tweet_df['age_groups'].str.slice(1,-1)
filtered_tweet_df = filtered_tweet_df[['created_at', 'tweet_text', 'province', 'age_groups', 'cities', 'FSAs',]]
filtered_tweet_df = filtered_tweet_df.rename(
columns = {
'created_at': 'Time',
'tweet_text': 'Tweet',
'tweet_link': 'Link',
'province': 'Province',
'age_groups': 'Age Groups',
'cities': 'City/Region',
'FSAs': 'FSA',
}
)
filtered_tweet_df['Time'] = filtered_tweet_df['Time'].dt.tz_convert('US/Eastern')
filtered_tweet_df['Time'] = filtered_tweet_df['Time'].dt.strftime('%a %d %b %I:%M %p')
# st.write(filtered_tweet_df.to_markdown(tablefmt="grid"))
# tweet_df = tweet_df.set_index('Time')
# st.table(tweet_df)
st.write(filtered_tweet_df.to_markdown(index=False))
else:
logger.info(f"No Results found for: {search_criteria}")
st.warning("""
#### We couldn't find any tweets from Vaccine Hunters in your search criteria. It is still possible that you are eligible for vaccination and there are vaccination opporunities avaiable for you.
#### Please try searching other sources or try again later.
""")
st.caption(f"Last updated: {datetime.now(tz=local_timzeone).strftime('%a %d %b %I:%M %p')}. (EST)")
st.write("\n\n")
st.empty()
with st.beta_expander("Terms of Use"):
st.write("""
THE SOFTWARE AND PLATFORM IS PROVIDED ON AN ‘AS IS’ BASIS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. THE PLATFORM (VACCINEUPDATES.CA) MAKES NO WARRANTIES, REPRESENTATIONS OR CONDITIONS, EXPRESS OR IMPLIED, WRITTEN OR ORAL, ARISING BY STATUTE, OPERATION OF LAW, COURSE OF DEALING, USAGE OF TRADE OR OTHERWISE, REGARDING THE PLATFORM OR SERVICES. VACCINEUPDATES.CA (INCLUDING ITS AFFILIATES, LICENSORS, SUPPLIERS AND SUBCONTRACTORS) DOES NOT REPRESENT OR WARRANT THAT THE PLATFORM AND SERVICES WILL MEET ANY OR ALL OF USER’S PARTICULAR REQUIREMENTS, THAT THE PLATFORM WILL OPERATE ERROR-FREE OR UNINTERRUPTED OR THAT ALL ERRORS OR DEFECTS IN THE SERVICE CAN BE FOUND OR CORRECTED.
""")
logger.info(f"Finished serving current page")
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
if False:
st.markdown("""
<style>
.small-font {
font-size:10px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<p class="small-font">Hello World !!</p>', unsafe_allow_html=True)
|
py | 1a48f0ff48600d1842c1c874e5671dba92b8e771 | #!/usr/bin/env python
# Python version 3.4+
import sys
import os
import re
import math
import requests
# Simple ranged download script. For those times when the other end just decides
# to close the file stream and you end up with partial files. This fixes
# that issue.
# -> Requests is required. Use 'pip install requests' to download the module.
# This download script is partially extracted from my Bandcamp downloader,
# Campdown.
# The first argument is the url to download the file from.
# The second argument is the optional output folder the file should be written to.
# If none is specified the folder this script is in will be used.
# Check that this file's main function is not being called from another file.
if __name__ == "__main__":
try:
# Fetch the program arguments and make sure that they are valid.
try:
url = sys.argv[1].replace("\"", "")
except:
print("\nMissing required URL argument")
sys.exit(2)
if "http://" not in url and "https://" not in url:
print("\n%s is not a valid URL" % url)
sys.exit(2)
# Get the path of the current execution folder.
folder = os.path.split(os.path.abspath(
__file__).replace("\\", "/"))[0] + "/"
name = re.findall("(?=\w+\.\w{3,4}$).+", url)[0]
# Get the size of the remote file.
full_response = requests.get(url, stream=True)
total_length = full_response.headers.get("content-length")
# Open a file stream which will be used to save the output string
with open(folder + "/" + re.sub("[\\/:*?<>|]", "", name), "wb") as f:
# Make sure that the printed string is compatible with the user"s command line. Else, encode.
# This applies to all other print arguments throughout this file.
try:
print("Downloading: %s" % name)
except UnicodeEncodeError:
try:
print("Downloading: %s" % name.encode(
sys.stdout.encoding, errors="replace").decode())
except UnicodeDecodeError:
print("Downloading: %s" % name.encode(
sys.stdout.encoding, errors="replace"))
# If the file is empty simply write out the returned content from
# the request.
if total_length is None:
f.write(full_response.content)
else:
# Storage variables used while evaluating the already
# downloaded data.
dl = 0
total_length = int(total_length)
cleaned_length = int((total_length * 100) / pow(1024, 2)) / 100
block_size = 2048
for i in range(math.ceil(total_length / 1048576)):
response = requests.get(url, headers={
"Range": "bytes=" + str(i * 1048576) + "-" + str((i + 1) * (1048576) - 1)}, stream=True)
for chunk in response.iter_content(chunk_size=block_size):
# Add the length of the chunk to the download size and
# write the chunk to the file.
dl += len(chunk)
f.write(chunk)
# Display a loading bar based on the currently download
# filesize.
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s%s] %sMB / %sMB " % ("=" * done, ">", " " * (
50 - done), (int(((dl) * 100) / pow(1024, 2)) / 100), cleaned_length))
sys.stdout.flush()
# Insert a new line for formatting-OCD's sake.
print("\n")
except (KeyboardInterrupt):
print("Interrupt caught - exiting program...")
sys.exit(2)
|
py | 1a48f13ee71f10ffaf92822c385d016bd2f57d5d | ## Animal is-a object (yes, sort of confusing) look at the extra credit
class Animal(object):
pass
## ??
class Dog(Animal):
def __init__(self, name):
## ??
self.name = name
## ??
class Cat(Animal):
def __init__(self, name):
## ??
self.name = name
## ??
class Person(object):
def __init__(self, name):
## ??
self.name = name
## Person has-a pet of some kind
self.pet = None
## ??
class Employee(Person):
def __init__(self, name, salary):
## ?? hmm what is this strange magic?
super(Employee, self).__init__(name)
## ??
self.salary = salary
## ??
class Fish(object):
pass
## ??
class Salmon(Fish):
pass
## ??
class Halibut(Fish):
pass
## rover is-a Dog
rover = Dog("Rover")
## ??
satan = Cat("Satan")
## ??
mary = Person("Mary")
## ??
mary.pet = satan
## ??
frank = Employee("Frank", 120000)
## ??
frank.pet = rover
## ??
flipper = Fish()
## ??
crouse = Salmon()
## ??
harry = Halibut()
|
py | 1a48f26352bdde8a7e49b929e7fd26aec716bace | #!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains unit tests of the :mod:`arkane.ess.gaussian` module.
"""
import os
import unittest
import numpy as np
import rmgpy.constants as constants
from external.wip import work_in_progress
from rmgpy.statmech import IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator, HinderedRotor
from arkane.ess.gaussian import GaussianLog
from arkane.exceptions import LogError
################################################################################
class GaussianLogTest(unittest.TestCase):
"""
Contains unit tests for the gaussian module, used for parsing Gaussian log files.
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.data_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'gaussian')
def test_check_for_errors(self):
"""
Uses Gaussian log files that had various errors
to test if errors are properly parsed.
"""
with self.assertRaises(LogError):
GaussianLog(os.path.join(self.data_path, 'l913.out'))
with self.assertRaises(LogError):
GaussianLog(os.path.join(self.data_path, 'l9999.out'))
with self.assertRaises(LogError):
GaussianLog(os.path.join(self.data_path, 'error_termination.out'))
@work_in_progress
def test_load_ethylene_from_gaussian_log_cbsqb3(self):
"""
Uses a Gaussian03 log file for ethylene (C2H4) to test that its
molecular degrees of freedom can be properly read.
"""
log = GaussianLog(os.path.join(self.data_path, 'ethylene.log'))
conformer, unscaled_frequencies = log.load_conformer()
e0 = log.load_energy()
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, HinderedRotor)]) == 0)
trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0]
rot = [mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)][0]
vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0]
t_list = np.array([298.15], np.float64)
self.assertAlmostEqual(trans.get_partition_function(t_list), 5.83338e6, delta=1e1)
self.assertAlmostEqual(rot.get_partition_function(t_list), 2.59622e3, delta=1e-2)
self.assertAlmostEqual(vib.get_partition_function(t_list), 1.0481e0, delta=1e-4)
self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -78.467452, 4)
self.assertEqual(conformer.spin_multiplicity, 1)
self.assertEqual(conformer.optical_isomers, 1)
def test_gaussian_energies(self):
"""
test parsing double hydride, MP2, CCSD, CCSD(T) form Gaussian log
"""
log_doublehybrid = GaussianLog(os.path.join(self.data_path, 'B2PLYP.LOG'))
log_mp2 = GaussianLog(os.path.join(self.data_path, 'UMP2_C_ATOM.LOG'))
log_ccsd = GaussianLog(os.path.join(self.data_path, 'UCCSD_C_ATOM.LOG'))
log_ccsdt = GaussianLog(os.path.join(self.data_path, 'UCCSDT_C_ATOM.LOG'))
log_qb3 = GaussianLog(os.path.join(os.path.dirname(os.path.dirname(__file__)),
'../examples/arkane/species/C2H5/', 'ethyl_cbsqb3.log'))
self.assertAlmostEqual(log_doublehybrid.load_energy() / constants.Na / constants.E_h, -0.40217794572194e+02,
delta=1e-6)
self.assertAlmostEqual(log_mp2.load_energy() / constants.Na / constants.E_h, -0.37504683723025e+02,
delta=1e-6)
self.assertAlmostEqual(log_ccsd.load_energy() / constants.Na / constants.E_h, -37.517151426,
delta=1e-6)
self.assertAlmostEqual(log_ccsdt.load_energy() / constants.Na / constants.E_h, -0.37517454469e+02,
delta=1e-6)
self.assertAlmostEqual(log_qb3.load_energy() / constants.Na / constants.E_h, -79.029798,
delta=1e-6)
def test_load_oxygen_from_gaussian_log(self):
"""
Uses a Gaussian03 log file for oxygen (O2) to test that its
molecular degrees of freedom can be properly read.
"""
log = GaussianLog(os.path.join(self.data_path, 'oxygen.log'))
conformer, unscaled_frequencies = log.load_conformer()
e0 = log.load_energy()
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, LinearRotor)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, HinderedRotor)]) == 0)
trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0]
rot = [mode for mode in conformer.modes if isinstance(mode, LinearRotor)][0]
vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0]
t_list = np.array([298.15], np.float64)
self.assertAlmostEqual(trans.get_partition_function(t_list), 7.11169e6, delta=1e1)
self.assertAlmostEqual(rot.get_partition_function(t_list), 7.13316e1, delta=1e-4)
self.assertAlmostEqual(vib.get_partition_function(t_list), 1.00037e0, delta=1e-4)
self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -150.3784877, 4)
self.assertEqual(conformer.spin_multiplicity, 3)
self.assertEqual(conformer.optical_isomers, 1)
@work_in_progress
def test_load_ethylene_from_gaussian_log_g3(self):
"""
Uses a Gaussian03 log file for ethylene (C2H4) to test that its
molecular degrees of freedom can be properly read.
"""
log = GaussianLog(os.path.join(self.data_path, 'ethylene_G3.log'))
conformer, unscaled_frequencies = log.load_conformer()
e0 = log.load_energy()
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)]) == 1)
self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, HinderedRotor)]) == 0)
trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0]
rot = [mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)][0]
vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0]
t_list = np.array([298.15], np.float64)
self.assertAlmostEqual(trans.get_partition_function(t_list), 5.83338e6, delta=1e1)
self.assertAlmostEqual(rot.get_partition_function(t_list), 2.53410e3, delta=1e-2)
self.assertAlmostEqual(vib.get_partition_function(t_list), 1.0304e0, delta=1e-4)
self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -78.562189, 4)
self.assertEqual(conformer.spin_multiplicity, 1)
self.assertEqual(conformer.optical_isomers, 1)
def test_load_symmetry_and_optics(self):
"""
Uses a Gaussian03 log file for oxygen (O2) to test that its
molecular degrees of freedom can be properly read.
"""
log = GaussianLog(os.path.join(self.data_path, 'oxygen.log'))
optical, symmetry, _ = log.get_symmetry_properties()
self.assertEqual(optical, 1)
self.assertEqual(symmetry, 2)
conf = log.load_conformer()[0]
self.assertEqual(conf.optical_isomers, 1)
found_rotor = False
for mode in conf.modes:
if isinstance(mode, LinearRotor):
self.assertEqual(mode.symmetry, 2)
found_rotor = True
self.assertTrue(found_rotor)
def test_load_scan_angle(self):
"""
Ensures proper scan angle found in Gaussian scan job
"""
log = GaussianLog(os.path.join(self.data_path, 'isobutanolQOOH_scan.log'))
self.assertAlmostEqual(log._load_scan_angle(), 10.0)
def test_load_number_scans(self):
"""
Ensures proper scan angle found in Gaussian scan job
"""
log = GaussianLog(os.path.join(self.data_path, 'isobutanolQOOH_scan.log'))
self.assertAlmostEqual(log._load_number_scans(), 36)
def test_load_scan_with_freq(self):
"""
Ensures that the length of enegies with hr scans and freq calc is correct
"""
log = GaussianLog(os.path.join(self.data_path, 'hr_scan_with_freq.log'))
self.assertAlmostEqual(log._load_number_scans(), 36)
self.assertAlmostEqual(log._load_scan_angle(), 10.0)
vlist, _ = log.load_scan_energies()
self.assertEqual(len(vlist), 37)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
py | 1a48f2d067bb5395a1a39bad62f263bcde55bc84 | from io import StringIO
import os
import numpy as np
import pandas as pd
from .. import fem_attribute
class ListStringSeries():
def __init__(self, list_string_series):
self._list_string_series = list_string_series
return
def __len__(self):
return len(self._list_string_series)
def __getitem__(self, key):
if isinstance(key, int):
return self._list_string_series[key]
elif isinstance(key, list):
return [self[i] for i in key]
else:
raise ValueError(f"Unexpected key: {key}")
def strip(self):
return [s.strip() for s in self]
def expand_include(self, pattern, base_name):
return [s.expand_include(pattern, base_name) for s in self]
class StringSeries(pd.Series):
def __init__(self, *args, **kw):
if len(args) == 0 or len(args[0]) == 0:
kw['dtype'] = object
super().__init__(*args, **kw)
@property
def _constructor(self):
return StringSeries
@classmethod
def read_file(cls, file_name, *, pattern_ignore=None):
"""Read file and convert to numpy string array.
Args:
file_name: String of file name.
pattern_ignore: String to be used for ignore unecessary line
e.g. comment.
Returns:
StringDataFrame object. Each component corresponds to each line of
the input file.
"""
print(f"Reading file: {file_name}")
s = pd.read_csv(
file_name, header=None, index_col=None, sep='@', dtype=str)[0]
# sep='@' because don't want to separate
if pattern_ignore is None:
return cls(s)
else:
return cls(s).find_match(
pattern_ignore, negative_match=True)
@classmethod
def read_files(cls, file_names, *, pattern_ignore=None, separate=False):
"""Read files.
Args:
file_names: Array of strings indicating file names.
pattern_ignore: String to be used for ignore unecessary line
e.g. comment.
separate: bool
If True, return separated contents, namely, ListStringSeries
object.
Returns:
StringDataFrame object. Each component corresponds to each line of
input files (contents are concatenated).
"""
if separate:
list_string_series = ListStringSeries([
cls.read_file(file_name, pattern_ignore=pattern_ignore)
for file_name in file_names])
if len(list_string_series) == 1:
return list_string_series[0]
else:
return list_string_series
else:
return cls(pd.concat([
cls.read_file(file_name, pattern_ignore=pattern_ignore)
for file_name in file_names]))
@classmethod
def read_array(cls, _array, *, delimiter=',', str_format=None):
"""Read array to make StringSeries object.
Args:
array: Ndarray or list of NDarray to make StringSeries object.
delimiter: String indicating delimiter to connect components in
a raw (default: ',').
str_format: Format string to be passed to numpy.savetxt.
Returns: StringSeries object after reading arrays.
"""
array = np.asarray(_array)
if str_format is None and 'float' in str(array.dtype):
str_format = '%.8E'
if len(array.shape) == 1:
if str_format is None:
try:
str_array = array.astype(str)
return cls(str_array)
except ValueError:
return cls.read_array(
array[:, None], delimiter=delimiter,
str_format=str_format)
else:
sio = StringIO()
np.savetxt(sio, array, fmt=str_format)
return cls(sio.getvalue().split('\n')[:-1])
elif len(array.shape) == 2 and array.shape[1] == 1:
if str_format is None:
try:
converted_array = array.astype(str)
# Array can be converted to other types
return cls(converted_array[:, 0])
except ValueError:
# Array is realy object
return cls(np.array([
'\n'.join(delimiter.join(a) for a in arr.astype(str))
for arr in array[:, 0]
]))
else:
sio = StringIO()
np.savetxt(sio, array[:, 0], fmt=str_format)
return cls(sio.getvalue().split('\n')[:-1])
elif len(array.shape) > 2:
raise ValueError(f"Too high dimensions: {array.shape}")
else:
pass
a0 = array[:, 0]
if str_format is None:
s = cls(a0.astype(str))
for a in array[:, 1:].T:
s = s.connect(a.astype(str))
else:
sio = StringIO()
np.savetxt(sio, a0, fmt=str_format)
s = cls(sio.getvalue().split('\n')[:-1])
for a in array[:, 1:].T:
sio = StringIO()
np.savetxt(sio, a, fmt=str_format)
s = s.connect(sio.getvalue().split('\n')[:-1])
return s
@classmethod
def connect_all(cls, list_data, delimiter=',', str_format=None):
if len(list_data) == 0:
return cls()
if str_format is None:
str_format = [None] * len(list_data)
elif isinstance(str_format, str):
str_format = [str_format] * len(list_data)
if len(list_data) != len(str_format):
raise ValueError(
'When str_format is list, the length should be'
'the same as that of list_data'
f"({len(str_format)} vs {len(list_data)})")
s = cls.read_array(list_data[0], str_format=str_format[0])
for d, f in zip(list_data[1:], str_format[1:]):
s = s.connect(
cls.read_array(d, str_format=f), delimiter=delimiter)
return s
@classmethod
def concat(cls, list_data, axis=0):
return cls(pd.concat(list_data, axis=axis))
def to_header_data(self, pattern):
matches = self.str.match(pattern).values
headers = self[matches]
match_indices = np.concatenate([np.where(matches)[0], [len(self)]])
list_indices = [
range(i1+1, i2) for i1, i2
in zip(match_indices[:-1], match_indices[1:])]
return HeaderData(headers, list_indices, data=self)
# header_dict = {
# header: self[i1+1:i2] for header, i1, i2
# in zip(headers, match_indices[:-1], match_indices[1:])}
# return HeaderData(header_dict)
def strip(self):
return self.str.strip()
def extract_captures(self, pattern, *, convert_values=False):
captures = self.str.extract(pattern, expand=False)
captures = captures[~pd.isnull(captures)]
if convert_values:
return captures.values
else:
return captures
def find_match(self, pattern, *, allow_multiple_matches=True,
convert_values=False, negative_match=False):
"""Find match to the specified pattern.
Args:
pattern: Pattern to be used for matching.
allow_multiple_matches: True to accept several matches.
(Default = True)
convert_values: Bool, [True]
Flag to convert StringSeries to values
Returns:
StringSeries or ndarray of matches.
"""
if negative_match:
match = self[~self.str.contains(pattern)]
else:
match = self[self.str.contains(pattern)]
if not allow_multiple_matches and len(match) > 1:
raise ValueError(f"{len(match)} matches found. Expected 1.")
if convert_values:
return match.values
else:
return match
def expand_include(self, pattern, base_name):
"""Expand data like 'include' statement. Expanded data is concatenated
at the end of the non-expanded data.
Args:
pattern: Pattern showing include statement. Include file should be
captured with the first expression.
base_name: Directory name of the include file location.
Returns:
StringSeries object after expansion.
"""
captures = self.extract_captures(pattern)
include_files = [os.path.join(base_name, c) for c in captures]
if len(include_files) == 0:
return self
include_ss = StringSeries.read_files(include_files)
return pd.concat([self, include_ss], ignore_index=True)
def to_fem_attribute(self, name, id_column, slice_data_columns, *,
data_type=float, delimiter=',',
data_unit='unit_unknown', generate_id2index=False):
"""Generate FEMAttribute object with parsing the series.
Args:
name: String indicating name of the attribute.
lines: Ndarray of strings contains data.
id_column: Int indicating the column of ids.
slice_data_columns: Slice object indicating the columns of data.
data_type: Type of the data (default: float)
delimiter: String of delimiter. (default: ',')
data_unit: String indicating unit of data.
(default: 'unit_unknown')
generate_id2index: bool
If True, generate pandas.DataFrame of IDs and indices.
Returns:
femio.FEMAttribute object.
"""
df = self.str.split(delimiter, expand=True)
ids = df.values[:, id_column].astype(float).astype(int)
data = df.values[:, slice_data_columns].astype(data_type)
return fem_attribute.FEMAttribute(
name, ids, data, data_unit=data_unit,
generate_id2index=generate_id2index)
def to_values(
self, delimiter=',', data_type=float, to_rank1=False,
until_column=None):
"""Delimit StringLines object with the specified delimiter to output
ndarray of the specified data_type.
Args:
delimiter: String of delimiter (default: ',').
data_type: Type of output data (default: float).
to_rank1: Boolean to control output (True: rank-1, False: rank-2,
default: False)
until_column: int, optional, [None]
Read until the specified column.
Returns:
Ndarray of the specified data_type.
"""
data = self.delimit(delimiter).astype(data_type)[:, :until_column]
# except ValueError:
# raise ValueError(self)
if to_rank1:
return np.concatenate(data)
else:
return data
def delimit(self, delimiter=','):
"""Delimit StringLines object with the specified delimiter to output
rank-2 ndarray of strings.
Args:
delimiter: String of delimiter (default: ',').
Returns:
rank-2 ndarray of string.
"""
return self.str.split(delimiter, expand=True).values
def split_vertical(self, index_cut, delimiter=','):
"""Split StringSeries object vertically.
Args:
index_cut: Index (= start index of 2nd obj) to cut the StringLines.
Return:
2-tuple of DataFrame objects after splitting.
"""
if len(self) == 0:
return (pd.DataFrame([]), pd.DataFrame([]))
if index_cut == 0:
pattern = f"([^{delimiter}]*){delimiter}(.*)"
else:
pattern = \
f"((?:[^{delimiter}]*{delimiter}){{{index_cut - 1}}}" \
+ f"[^{delimiter}]*){delimiter}(.*)"
df_split = self.str.extract(pattern, expand=True)
return (StringSeries(df_split[0]), StringSeries(df_split[1]))
def split_vertical_all(self, delimiter=','):
"""Split StringSeries object vertically. Output will be n StringSeries
objects.
Args:
Return:
n-tuple of StringSeries objexts after splitting.
"""
if len(self) == 0:
return (StringSeries([]), )
delimitted_data = self.delimit(delimiter)
return [StringSeries(d.T) for d in delimitted_data.T]
def connect(self, other, delimiter=','):
"""Connect two StringSeries objects with specified delimiter.
Lengths of two objects should be the same.
Args:
other: Other StringSeries object to be connected.
delimiter: String to appear at the connection.
Return:
StringSeries object after connection.
"""
if len(other) == 0:
return self
if len(self) != len(other):
raise ValueError('Dimension different: {} vs {}'.format(
len(self), len(other)))
return StringSeries(self.str.cat(
StringSeries(other).values, sep=delimiter, join='left'))
def indices_match_clusters(self, pattern, *, negative_match=False):
"""Make cluster of indices of matches. Cluster means a group with
continuous indices.
Args:
pattern: Pattern to be used for matching.
Returns:
list of ndarrays containing indices of each cluster.
"""
indices_matches = self.indices_matches(
pattern, negative_match=negative_match)
diff_ind = np.diff(indices_matches)
separation_indices = [i + 1 for i, d in enumerate(diff_ind) if d > 1]
start_indices = [0] + separation_indices
stop_indices = separation_indices + [len(indices_matches)]
return [indices_matches[i1:i2] for i1, i2
in zip(start_indices, stop_indices)]
def indices_matches(self, pattern, *, negative_match=False):
"""Return indices of matched lines.
Args:
pattern: Pattern to be used for matching.
Returns:
Ndarray of ints indicating indices of matched lines.
"""
matches = self.astype(str).str.contains(pattern)
if negative_match:
matches = ~matches
if np.all(~matches):
raise ValueError('No match found for: {}'.format(pattern))
return np.array(range(len(matches)))[matches]
def to_dict_fem_attributes(self, names, component_nums,
data_units=None, delimiter=','):
"""Generate dict of FEMAttribute objects with parsing the lines.
Args:
names: List of strings indicating names of the attributes.
component_nums: List of ints indicating # of components of each
attributes.
data_units: List of strings indicating unit of data.
(default: 'unit_unknown')
Returns:
Dict with key = name, value = fem.FEMAttribute.
"""
if data_units is None:
data_units = ['unit_unknown' for _ in names]
nums = np.concatenate([[0], np.cumsum(component_nums)]) + 1
ranges = [range(n1, n2) for n1, n2 in zip(nums[:-1], nums[1:])]
return {name: self.to_fem_attribute(
name, 0, r, delimiter=delimiter, data_unit=unit)
for name, r, unit in zip(names, ranges, data_units)}
class HeaderData():
def __init__(self, headers, list_indices, data):
if len(headers) != len(list_indices):
raise ValueError(
f"Length different: {len(headers)} vs {len(list_indices)}")
self.dict = data
self.headers = headers
self.list_indices = np.array([
np.array(indices) for indices in list_indices], dtype=object)
self.data = data
def extract_headers(self, key):
return self.headers.find_match(key)
def extract_data(self, key, *, concatenate=True):
indices = self.headers.str.contains(key)
if not np.any(indices):
return StringSeries([])
if concatenate:
concatenated_indices = np.concatenate(
self.list_indices[indices])
return self.data.iloc[concatenated_indices]
else:
return [self.data.iloc[index]
for index in self.list_indices[indices]]
|
py | 1a48f33d0408a107a324ae3cc6ce49b433e27791 | # -*- coding: utf-8 -*-
import requests, json
import numpy as np
import scipy.interpolate as si
from scipy.optimize import brentq
from functools import partial
_error_msg = {
1: 'Parameter t must be a list or an array that represents knot vector.',
2: 'Method parameter must be one of: "interp", "smooth", "lsq".'
}
def LoadSpline(curve_id_or_url):
url_split = curve_id_or_url.split("/")
if len(url_split) > 1:
url = curve_id_or_url
else:
curve_id = url_split[-1]
if "spl_" not in curve_id or len(curve_id) != 16:
raise ValueError("Wrong curve id was specified")
url = "https://splinecloud.com/api/curves/id/{}".format(curve_id)
response = requests.get(url)
curve = json.loads(response.content)
curve_params = curve['spline']
t = np.array(curve_params['t'])
c = np.array(curve_params['c'])
w = curve_params['w']
tcck = t, c[:, 0], c[:, 1], curve_params['k']
return ParametricUnivariateSpline.from_tcck(tcck)
class PPolyInvertible(si.PPoly):
"""Piecewise polynomial with ability to evaluate inverse dependency x(y)"""
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
# self = super(PPolyInvertible, cls).construct_fast(c, x, extrapolate=extrapolate, axis=axis)
self = super(PPolyInvertible, cls).construct_fast(
c, x, extrapolate=extrapolate)
self.k = len(self.c) - 1
self.powers = np.arange(self.k, -1, -1)
self.intervals = self._form_intervals(self.x)
# self.powers = np.arange(self.k, 0, -1)
return self
@classmethod
def from_splinefunc(cls, spline):
self = cls.from_spline(spline.tck)
self.project_intervals(spline)
return self
def eval_oninterval(self, n, numpoints=50):
coeffs = self.c.T[n + self.k]
tbreak = self.x[n + self.k]
a = self.intervals[n][0]
b = self.intervals[n][1]
tpoints = np.linspace(a, b, numpoints)
ppoints = np.zeros(len(tpoints))
i = 0
for t in tpoints:
ppoints[i] = self.eval_poly(t, coeffs, tbreak)
i += 1
return ppoints
def _get_poly(self, t, n, xvalue=0):
coeffs = self.c.T[n + self.k]
tbreak = self.x[n + self.k]
poly = self.eval_poly(t, coeffs, tbreak)
return poly - xvalue
def eval_poly(self, t, coeffs, tbreak):
# poly = coeffs[0]*(t - tbreak)**3 + coeffs[1]*(t - tbreak)**2 + coeffs[2]*(t - tbreak) + coeffs[3]
poly = 0
for c, p in zip(coeffs, self.powers):
poly += c*(t - tbreak)**p
return poly
def _get_interval(self, coord, intervals):
i = 0
for interval in intervals:
if coord >= interval[0] and coord <= interval[1]:
return i
else:
i += 1
return None
def _form_intervals(self, breaks):
# n = len(breaks) - 1
n = len(breaks) - 2*self.k - 1
intervals = np.zeros((n, 2))
i = self.k
for interval in intervals:
interval[0], interval[1] = breaks[i], breaks[i + 1]
i += 1
return intervals
def project_intervals(self, sf):
breaks = sf(self.x)
self.pintervals = self._form_intervals(breaks)
def _check_monotonous(self, intervals):
check = True
for interval in intervals:
if interval[1] < interval[0]:
check = False
break
return check
def evalinv(self, x):
pinterval = self._get_interval(x, self.pintervals)
if pinterval is not None:
interval = self.intervals[pinterval]
t = brentq(partial(self._get_poly, n=pinterval, xvalue=x),
interval[0], interval[1])
return t
else:
return None
class ParametricUnivariateSpline(object):
"""
One-dimensional parametric spline fit to a given set of data points.
Fits a spline x, y = spl(t) of degree `k` to the provided `x`, `y` data.
If fitting method is set to "interp" spline will interpolate
through all data points.
If fitting method is set to "smooth" then normalized smoothing
factor sn will be used to choose the number of knots.
Regular smoothing factor s used by underlying spline functions is evaluated as:
s = sn*sum((y_data[i])**2)
If fitting method is set to "lsq" and internal knot vector t is not specified
then uniform knot vector of length nk will be used to create least squares
spline approximation.
"""
def __init__(self, x_data, y_data, t=None, method="interp",
sn=None, k=3, w=None, nk=3, bbox=[None]*2):
self.x_data = x_data
self.y_data = y_data
self.k = k
self.data_len = len(self.x_data)
self.xmax, self.xmin = max(x_data), min(x_data)
self.nk = nk
if w is None:
w_ = np.ones(self.data_len)
else:
w_ = np.array(w)
## sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
# sscale = sum( (y_data[i])**2 for i in range(self.data_len))
if sn is not None:
spl_smax = si.LSQUnivariateSpline(
x_data, y_data, [], k=k, w=w, bbox=bbox)
s_data = [spl_smax(d) for d in x_data]
smax = sum((w_[i]*(y_data[i] - s_data[i]))**2
for i in range(self.data_len))
s = sn*smax
else:
s = None
if method == "interp":
# self.splinefunc = si.InterpolatedUnivariateSpline(x_data, y_data)
self.splinefunc = si.UnivariateSpline(
x_data, y_data, k=k , s=0.0, w=w, bbox=bbox)
elif method == "smooth":
self.splinefunc = si.UnivariateSpline(
x_data, y_data, k=k , s=s, w=w, bbox=bbox)
elif method == "lsq":
if t is None:
knots = self._uniform_knotvector(self.nk)
elif len(t) > 0:
knots = t
else:
raise ValueError(_error_msg[0])
self.splinefunc = si.LSQUnivariateSpline(
x_data, y_data, knots, k=k, w=w, bbox=bbox)
else:
raise ValueError(_error_msg[1])
knots = self.splinefunc.get_knots()
self.knots = self._form_knotvector(knots)
self.knots_norm = self._normalize_knotvector( d=self.data_len)
# self.knots_norm = self._normalize_knotvector(self.knots) #newfix
self.coeffs_x = self._get_controlpoints(self.knots)
self.coeffs_y = self.splinefunc.get_coeffs()
self.coeffs_t = self._get_controlpoints(self.knots_norm)
self._build_splines(self.coeffs_x, self.coeffs_y)
self._get_ppolyrep()
@classmethod
def from_tcck(cls, tcck):
"""Construct a parametric spline object from given tcck"""
self = cls.__new__(cls)
t, cx, cy, k = tcck
self.k = k
self.knots = t
self.knots_norm = self._normalize_knotvector()
self.coeffs_x = cx
self.coeffs_y = cy
self._build_splines(self.coeffs_x, self.coeffs_y)
self._get_ppolyrep()
return self
def __call__(self, tpoints):
x_points = self.spline_x(tpoints)
y_points = self.spline_y(tpoints)
return x_points, y_points
def eval(self, x):
if hasattr(x, '__iter__'):
t = np.array([self.spline_x.ppoly.evalinv(xi) for xi in x])
return self.spline_y.ppoly(t)
else:
t = self.spline_x.ppoly.evalinv(x)
return self.spline_y.ppoly(t)
def get_polypoints(self, n):
xpoints = self.spline_x.ppoly.eval_oninterval(n)
ypoints = self.spline_y.ppoly.eval_oninterval(n)
return xpoints, ypoints
def _get_ppolyrep(self):
self.spline_x.ppoly = PPolyInvertible.from_splinefunc(self.spline_x)
self.spline_y.ppoly = PPolyInvertible.from_splinefunc(self.spline_y)
def polyrep(self, tpoints):
return self.spline_x.ppoly(tpoints), self.spline_y.ppoly(tpoints)
def _build_splines(self, coeffs_x, coeffs_y):
tck_x = self.knots_norm, coeffs_x, self.k
tck_y = self.knots_norm, coeffs_y, self.k
self.spline_x = si.UnivariateSpline._from_tck(tck_x)
self.spline_y = si.UnivariateSpline._from_tck(tck_y)
self.spline_x.tck = tck_x
self.spline_y.tck = tck_y
def _form_knotvector(self, knots):
knots_full = np.concatenate(
([knots[0]]*self.k, knots, [knots[-1]]*self.k ))
return knots_full
def _normalize_knotvector(self, knots=None, d=1.0):
if knots is None: knots = self.knots
num_knots = len(knots)
ka = (knots[-1] - knots[0]) / d
knots_norm = np.empty(num_knots)
for i in range(num_knots):
knots_norm[i] = d - ((knots[-1] - knots[i])) / ka
return knots_norm
def _get_controlpoints(self, knots):
n = len(knots) - 1 - self.k
cpoints = np.empty(n)
for i in range(n):
tsum = 0
for j in range(1, self.k + 1):
tsum += knots[i + j]
cpoints[i] = tsum/float(self.k)
return cpoints
def _uniform_knotvector(self, nk):
if nk == 0:
return []
elif nk == 1:
return [(self.xmax - self.xmin) / 2.0 + self.xmin]
else:
knot_offset = float(self.xmax - self.xmin) / nk
# ks = self.xmin + knotdist
# ke = self.xmax - knotdist
knots = np.linspace(self.xmin, self.xmax, nk+2)
knots = knots[1:-1]
# knots = np.linspace(knot_offset, self.xmax-knot_offset, nk-2)
return knots
|
py | 1a48f3629142a8f3c4a7c7ace35a1796b4c9f232 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""drop_user_and_chart
Revision ID: cf5dc11e79ad
Revises: 41f5f12752f8
Create Date: 2019-01-24 15:30:35.834740
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'cf5dc11e79ad'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
if 'known_event' in inspector.get_table_names() != 'sqlite':
op.drop_constraint('known_event_user_id_fkey', 'known_event')
op.drop_table("chart")
op.drop_table("users")
def downgrade(): # noqa: D103
conn = op.get_bind()
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(255)),
sa.Column('superuser', sa.Boolean(), default=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
if conn.dialect.name == 'mysql':
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name='chart', column_name='last_modified', type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ('sqlite', 'mssql'):
return
if conn.dialect.name == 'postgresql':
conn.execute("set timezone=UTC")
op.alter_column(table_name='chart', column_name='last_modified', type_=sa.TIMESTAMP(timezone=True))
|
py | 1a48f5906f3eff8e15b24ca800b60a668b7fef6a | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gaussian """
import unittest
from test import QiskitMachineLearningTestCase
from test.datasets import get_deprecated_msg_ref
import warnings
import numpy as np
from qiskit_machine_learning.datasets import gaussian
class TestGaussian(QiskitMachineLearningTestCase):
"""Gaussian tests."""
def test_gaussian(self):
"""Gaussian test."""
with warnings.catch_warnings(record=True) as c_m:
warnings.simplefilter("always")
training_features, training_labels, test_features, test_labels = gaussian(
training_size=20, test_size=10, n=2, plot_data=False
)
with self.subTest("Test training_features"):
np.testing.assert_array_equal(training_features.shape, (40, 2))
with self.subTest("Test training_labels1"):
np.testing.assert_array_equal(training_labels.shape, (40, 2))
with self.subTest("Test training_labels2"):
np.testing.assert_array_equal(np.sum(training_labels, axis=0), np.array([20, 20]))
with self.subTest("Test training_labels3"):
np.testing.assert_array_equal(np.sum(training_labels, axis=1), np.ones(40))
with self.subTest("Test features.shape1"):
np.testing.assert_array_equal(test_features.shape, (20, 2))
with self.subTest("Test features.shape2"):
np.testing.assert_array_equal(test_features.shape, (20, 2))
with self.subTest("Test test_labels1"):
np.testing.assert_array_equal(np.sum(test_labels, axis=0), np.array([10, 10]))
with self.subTest("Test test_labels2"):
np.testing.assert_array_equal(np.sum(test_labels, axis=1), np.ones(20))
with self.subTest("Test deprecation msg"):
msg = str(c_m[0].message)
self.assertEqual(msg, get_deprecated_msg_ref("gaussian"))
if __name__ == "__main__":
unittest.main()
|
py | 1a48f608daab90d40fb3ba660e2a8dea44acb568 | NAME = "unicef-locations"
VERSION = __version__ = "3.1"
|
py | 1a48f610e7302f2198b5f866c8c97b8994b4f968 | '''
Contains all of the test code to make sure the code in `MAPLEAF` is running properly.
Directory structure mirrors that of MAPLEAF, with additional data directories.
All test/test_XXXX modules contains unit testing code for MAPLEAF/XXXX.
Regression/Validation test cases are run by code in `test.V&V`
Test/Example simulation definitions are in MAPLEAF/Examples/Simulations
Test/Example motor definitions are in MAPLEAF/Examples/Motors
Speed Tests of different simulator components are in `test.speedTests`
Wind Data for Suffield is in MAPLEAF/Examples/Wind
''' |
py | 1a48f62ce4a4fe8c9f735887266ba718003b3427 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# this allows to use the readthedocs theme also locally
import sphinx_rtd_theme
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'PyAero'
copyright = '2021, Andreas Ennemoser'
author = 'Andreas Ennemoser'
# The short X.Y version
version = '2.0'
# The full version, including alpha/beta/rc tags
release = 'v2.0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyAerodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyAero.tex', 'PyAero Documentation',
'Andreas Ennemoser', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyaero', 'PyAero Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyAero', 'PyAero Documentation',
author, 'PyAero', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
py | 1a48f760b55c1b0e58e5055c86e6a0b83bde4e11 | import math
import random
# random
x = 10
y = 50
print(random.randrange(x, y))
# math
num1 = 234.01
num2 = 6
num3 = -27.01
print("The smallest integer greater than or equal to num1,",
num1, ":", math.ceil(num1))
print("The largest integer smaller than or equal to num1,",
num1, ":", math.floor(num1))
print("The factorial of num2,", num2, ":", math.factorial(num2))
print("The absolute value of num3", num3, ":", math.fabs(num3))
|
py | 1a48f791e86a8e5dc392feb8b9d524831c067b1d | """Tests for the MDWeb Base Objects."""
from pyfakefs import fake_filesystem_unittest, fake_filesystem
import unittest
from mdweb.BaseObjects import MetaInfParser
from mdweb.Exceptions import PageMetaInfFieldException
from mdweb.Navigation import Navigation
from mdweb.Page import Page, load_page
class TesNavigationBaseItem(fake_filesystem_unittest.TestCase):
"""MDSite Navigation Base tests."""
def setUp(self):
"""Create fake filesystem."""
self.setUpPyfakefs()
self.fake_os = fake_filesystem.FakeOsModule(self.fs)
def test_navigation_type(self):
"""A directory should have navigation type 'Navigation'."""
self.fs.create_file('/my/content/index.md')
nav = Navigation('/my/content')
self.assertEqual(nav.nav_type, "Navigation")
def test_page_type(self):
"""A file in a directory should have navigation type 'Page'."""
file_string = u""
self.fs.create_file('/my/content/index.md',
contents=file_string)
page = Page(*load_page('/my/content', '/my/content/index.md'))
self.assertEqual(page.nav_type, "Page")
class TestMetaInfParser(unittest.TestCase):
"""Index object tests."""
class MockMetaInf(MetaInfParser): # pylint: disable=R0903
"""MDWeb Navigation Meta Information."""
FIELD_TYPES = {
'nav_name': ('unicode', None),
'order': ('int', 0),
}
def test_blank_value(self):
"""A blank value in a meta-inf definition should raise exception."""
self.assertRaises(PageMetaInfFieldException,
self.MockMetaInf,
'''Nav Name: Documentation
Order: ''')
|
py | 1a48f9cf730144fd896148e32fe9e94ee51c86e5 | from .player import Player
from typing import Optional
class Game:
def __init__(self, p1: Player, p2: Player):
self._p1 = p1
self._p2 = p2
@property
def winner(self) -> Optional[Player]:
if self._p1.has_lost():
return self._p2
if self._p2.has_lost():
return self._p1
return None
def __str__(self) -> str:
return f"{self._p1} - {self._p2}"
def __eq__(self, other: "Game") -> bool:
return self._p1 == other._p1 and self._p2 == other._p2
def __hash__(self) -> int:
return hash(self._p1) ^ hash(self._p2)
@property
def p1(self) -> Player:
return self._p1
@property
def p2(self) -> Player:
return self._p2
def p1_won(self) -> bool:
return self.winner == self._p1
def single_round(self) -> "Game":
if self.winner is not None:
return self
card1 = self._p1.top()
card2 = self._p2.top()
if card1 > card2:
p1 = self._p1.win_round(card1, card2)
p2 = self._p2.lose_round()
else:
p1 = self._p1.lose_round()
p2 = self._p2.win_round(card2, card1)
return Game(p1, p2)
def play_game(self) -> "Game":
game = self
while game.winner is None:
game = game.single_round()
return game
|
py | 1a48f9fe1024ca5b68910751dee2743bc1abb0ff | from django.shortcuts import render
# Create your views here.
def page(request, num="1"):
pass
|
py | 1a48fa186d8b0a035ca2cf3f56ab48f662d474aa | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
from tensorboard.plugins import projector
from text_fast import TextFAST
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
OPTION = dh._option(pattern=0)
logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def train_fasttext():
"""Training FASTTEXT model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load sentences, labels, and training parameters
logger.info("Loading data...")
logger.info("Data processing...")
train_data = dh.load_data_and_labels(args, args.train_file, word2idx)
val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)
# Build a graph and fasttext object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
fasttext = TextFAST(
sequence_length=args.pad_seq_len,
vocab_size=len(word2idx),
embedding_type=args.embedding_type,
embedding_size=args.embedding_dim,
num_classes=args.num_classes,
l2_reg_lambda=args.l2_lambda,
pretrained_embedding=embedding_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,
global_step=fasttext.global_step,
decay_steps=args.decay_steps,
decay_rate=args.decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(fasttext.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=fasttext.global_step,
name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = dh.get_out_dir(OPTION, logger)
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints"))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", fasttext.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)
best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)
if OPTION == 'R':
# Load fasttext model
logger.info("Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if OPTION == 'T':
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = "embedding"
embedding_conf.metadata_path = args.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt"))
current_step = sess.run(fasttext.global_step)
def train_step(batch_data):
"""A single training step."""
x_f, x_b, y_onehot = zip(*batch_data)
feed_dict = {
fasttext.input_x_front: x_f,
fasttext.input_x_behind: x_b,
fasttext.input_y: y_onehot,
fasttext.dropout_keep_prob: args.dropout_rate,
fasttext.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, fasttext.global_step, train_summary_op, fasttext.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(val_loader, writer=None):
"""Evaluates model on a validation set."""
batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)
eval_counter, eval_loss = 0, 0.0
true_labels = []
predicted_scores = []
predicted_labels = []
for batch_validation in batches_validation:
x_f, x_b, y_onehot = zip(*batch_validation)
feed_dict = {
fasttext.input_x_front: x_f,
fasttext.input_x_behind: x_b,
fasttext.input_y: y_onehot,
fasttext.dropout_keep_prob: 1.0,
fasttext.is_training: False
}
step, summaries, predictions, cur_loss = sess.run(
[fasttext.global_step, validation_summary_op,
fasttext.topKPreds, fasttext.loss], feed_dict)
# Prepare for calculating metrics
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in predictions[0]:
predicted_scores.append(j[0])
for k in predictions[1]:
predicted_labels.append(k[0])
eval_loss = eval_loss + cur_loss
eval_counter = eval_counter + 1
if writer:
writer.add_summary(summaries, step)
eval_loss = float(eval_loss / eval_counter)
# Calculate Precision & Recall & F1
eval_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
eval_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
eval_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
return eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc
# Generate batches
batches_train = dh.batch_iter(list(create_input_data(train_data)), args.batch_size, args.epochs)
num_batches_per_epoch = int((len(train_data['f_pad_seqs']) - 1) / args.batch_size) + 1
# Training loop. For each batch...
for batch_train in batches_train:
train_step(batch_train)
current_step = tf.train.global_step(sess, fasttext.global_step)
if current_step % args.evaluate_steps == 0:
logger.info("\nEvaluation:")
eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc = \
validation_step(val_data, writer=validation_summary_writer)
logger.info("All Validation set: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc))
best_saver.handle(eval_acc, sess, current_step)
if current_step % args.checkpoint_steps == 0:
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logger.info("Saved model checkpoint to {0}\n".format(path))
if current_step % num_batches_per_epoch == 0:
current_epoch = current_step // num_batches_per_epoch
logger.info("Epoch {0} has finished!".format(current_epoch))
logger.info("All Done.")
if __name__ == '__main__':
train_fasttext()
|
py | 1a48fa5bc234bffc0d870c3344a36ca95139c559 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import shutil
import time
import warnings
import moxing as mox
import apex
import numpy as np
import torch.npu
from apex import amp
from collections import OrderedDict
import torch
import torch.onnx
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
from models import resnet_0_6_0
CALCULATE_DEVICE = "npu:0"
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', default='', type=str,
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--npu', default=None, type=int,
help='NPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--device', default='npu', type=str, help='npu or gpu')
parser.add_argument('--addr', default='10.136.181.115',
type=str, help='master addr')
parser.add_argument('--amp', default=False, action='store_true',
help='use amp to train the model')
parser.add_argument('--warm_up_epochs', default=0, type=int,
help='warm up')
parser.add_argument('--loss-scale', default=1024., type=float,
help='loss scale using in amp, default -1 means dynamic')
parser.add_argument('--opt-level', default='O2', type=str,
help='loss scale using in amp, default -1 means dynamic')
parser.add_argument('--prof', default=False, action='store_true',
help='use profiling to evaluate the performance of model')
parser.add_argument('--save_path', default='', type=str,
help='path to save models')
parser.add_argument('--num_classes', default=1000, type=int,
help='path to save models')
# modelarts modification
parser.add_argument('--train_url',
default='',
type=str,
help="setting dir of training output")
parser.add_argument('--data_url',
metavar='DIR',
default='',
help='path to dataset')
parser.add_argument('--model_url',
metavar='DIR',
default='',
help='path to pretrained model')
parser.add_argument('--onnx', default=True, action='store_true',
help="convert pth model to onnx")
cur_step = 0
CACHE_TRAINING_URL = "/cache/training/"
CACHE_DATA_URL = "/cache/data_url"
CACHE_MODEL_URL = "/cache/model"
best_acc1 = 0
def main():
args = parser.parse_args()
global CALCULATE_DEVICE
CALCULATE_DEVICE = "npu:{}".format(args.npu)
if 'npu' in CALCULATE_DEVICE:
torch.npu.set_device(CALCULATE_DEVICE)
if args.data_url:
import moxing as mox
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
###### modify npu_p1 1######
args.gpu = None
###### modify npu_p1 1 end ######
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
###### modify 8 ######
if args.device == 'npu':
dist.init_process_group(backend=args.dist_backend, # init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
else:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
###### modify 8 end ######
# create model
if args.pretrained:
print("=> using pre-trained model wide_resnet101_2")
model = resnet_0_6_0.wide_resnet101_2()
print("loading model of yours...")
model_path = "./checkpoint.pth.tar"
if args.model_url:
real_path = CACHE_MODEL_URL
if not os.path.exists(real_path):
os.makedirs(real_path)
mox.file.copy_parallel(args.model_url, real_path)
print("training data finish copy to %s." % real_path)
model_path = os.path.join(CACHE_MODEL_URL, 'checkpoint.pth.tar')
pretrained_dict = torch.load(model_path, map_location="cpu")["state_dict"]
model.load_state_dict({k.replace('module.', ''): v for k, v in pretrained_dict.items()})
if "fc.weight" in pretrained_dict:
pretrained_dict.pop('fc.weight')
pretrained_dict.pop('fc.bias')
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(2048, args.num_classes)
#model.load_state_dict(pretrained_dict, strict=False)
else:
print("=> creating model wide_resnet101_2")
model = resnet_0_6_0.wide_resnet101_2()
# if not torch.cuda.is_available():
# print('using CPU, this will be slow')
# elif args.distributed:
###### modify npu_p1 2######
if args.distributed:
###### modify npu_p1 2 end ######
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
# model = torch.nn.DataParallel(model).cuda()
###### modify npu_p1 3######
model = model.to(CALCULATE_DEVICE)
###### modify npu_p1 3 end ######
# define loss function (criterion) and optimizer
# criterion = nn.CrossEntropyLoss().cuda(args.gpu)
############## npu modify 4 begin #############
# 将损失函数迁移到NPU上进行计算。
criterion = nn.CrossEntropyLoss().to(CALCULATE_DEVICE)
############## npu modify 4 end #############
optimizer = apex.optimizers.NpuFusedSGD(model.parameters(), args.lr,
momentum=args.momentum,
nesterov=True,
weight_decay=args.weight_decay)
###### modify 1 ######
if args.amp:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale)
###### modify 1 end ######
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
if args.data_url:
real_path = CACHE_DATA_URL
if not os.path.exists(real_path):
os.makedirs(real_path)
mox.file.copy_parallel(args.data_url, real_path)
print("training data finish copy to %s." % real_path)
args.data = real_path
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
###### modify 7 ######
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(
train_sampler is None),
num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True)
###### modify 7 end #######
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
###### modify 3 ######
if args.prof:
profiling(train_loader, model, criterion, optimizer, args)
return
###### modify 3 end ######
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
if args.train_url:
mox.file.copy_parallel(CACHE_TRAINING_URL, args.train_url)
def proc_node_module(checkpoint, AttrName):
new_state_dict = OrderedDict()
for k, v in checkpoint[AttrName].items():
if(k[0:7] == "module."):
name = k[7:]
else:
name = k[0:]
new_state_dict[name] = v
return new_state_dict
def convert(model_path, onnx_save, num_class):
checkpoint = torch.load(model_path, map_location='cpu')
checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict')
model = resnet_0_6_0.wide_resnet101_2(num_classes=num_class)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
input_names = ["actual_input_1"]
output_names = ["output1"]
dummy_input = torch.randn(1, 3, 224, 224)
if len(onnx_save) > 0:
save_path = os.path.join(onnx_save, "wide_resnet101_2_npu_16.onnx")
else:
save_path = "wide_resnet101_2_npu_16.onnx"
print(save_path)
torch.onnx.export(model, dummy_input, save_path
, input_names=input_names, output_names=output_names
, opset_version=11)
def profiling(data_loader, model, criterion, optimizer, args):
# switch to train mode
model.train()
def update(model, images, target, optimizer):
output = model(images)
loss = criterion(output, target)
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.zero_grad()
optimizer.step()
for step, (images, target) in enumerate(data_loader):
if args.device == 'npu':
# loc = 'npu:{}'.format(args.gpu)
loc = CALCULATE_DEVICE
images = images.to(loc, non_blocking=True).to(torch.float)
target = target.to(torch.int32).to(loc, non_blocking=True)
else:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
if step < 5:
update(model, images, target, optimizer)
else:
if args.device == 'npu':
with torch.autograd.profiler.profile(use_npu=True) as prof:
update(model, images, target, optimizer)
else:
with torch.autograd.profiler.profile(use_cuda=True) as prof:
update(model, images, target, optimizer)
break
prof.export_chrome_trace("output.prof")
def train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
# if torch.cuda.is_available():
# target = target.cuda(args.gpu, non_blocking=True)
############## npu modify 5 begin #############
# 将数据集迁移到NPU上进行计算并修改target数据类型
if 'npu' in CALCULATE_DEVICE:
target = target.to(torch.int32)
images, target = images.to(CALCULATE_DEVICE, non_blocking=True), target.to(CALCULATE_DEVICE, non_blocking=True)
############## npu modify 5 end #############
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
###### modify 2 ######
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
###### modify 2 end ######
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
###### modify 4 ######
if i % args.print_freq == 0:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
progress.display(i)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
# print("[npu id:", args.gpu, "]", "batch_size:", ngpus_per_node * args.batch_size,
# 'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(
# args.batch_size / batch_time.avg))
if batch_time.avg:
print("[npu id:", CALCULATE_DEVICE, "]", "batch_size:", args.world_size * args.batch_size,
'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(
args.batch_size * args.world_size / batch_time.avg))
###### modify 4 end ######
def validate(val_loader, model, criterion, args):
###### modify 5 ######
batch_time = AverageMeter('Time', ':6.3f', start_count_index= 5)
###### modify 5 end ######
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.device == 'npu':
loc = CALCULATE_DEVICE
images = images.to(loc).to(torch.float)
if args.device == 'npu':
loc = CALCULATE_DEVICE
target = target.to(torch.int32).to(loc, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
args = parser.parse_args()
if args.train_url:
os.makedirs(CACHE_TRAINING_URL, 0o755, exist_ok=True)
filename = os.path.join(CACHE_TRAINING_URL, filename)
torch.save(state, filename)
convert(filename, CACHE_TRAINING_URL, args.num_classes)
path_best = os.path.join(CACHE_TRAINING_URL, 'model_best.pth.tar')
if is_best:
shutil.copyfile(filename, path_best)
else:
filename = os.path.join(args.save_path, filename)
torch.save(state, filename)
path_best = os.path.join(args.save_path, 'model_best.pth.tar')
if is_best:
shutil.copyfile(filename, path_best)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', start_count_index=2):
self.name = name
self.fmt = fmt
self.reset()
self.start_count_index = start_count_index
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if self.count == 0:
self.N = n
self.val = val
self.count += n
if self.count > (self.start_count_index * self.N):
self.sum += val * n
self.avg = self.sum / (self.count - self.start_count_index * self.N)
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by cosine method"""
if args.warm_up_epochs > 0 and epoch < args.warm_up_epochs:
lr = args.lr * ((epoch + 1) / (args.warm_up_epochs + 1))
else:
alpha = 0
cosine_decay = 0.5 * (
1 + np.cos(np.pi * (epoch - args.warm_up_epochs) / (args.epochs - args.warm_up_epochs)))
decayed = (1 - alpha) * cosine_decay + alpha
lr = args.lr * decayed
print("=> Epoch[%d] Setting lr: %.4f" % (epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
############## npu modify 6 begin #############
############## npu modify 6 begin #############
main()
|
py | 1a48fa9b12d7c4f8a49fa4df8e27712f1f63a4aa | from django import template
register = template.Library()
@register.filter(name='key')
def get_key(d, key):
return d.get(key)
@register.filter(name='get_name')
def get_full_name(d, key):
return d.get(key).last_name + ' ' + d.get(key).first_name
@register.filter(name='get_url')
def get_url(d, key):
return d.get(key).get_absolute_url()
@register.filter(name='get_group_name')
def get_group_name(d):
return d.keys()[0].name
|
py | 1a48faecf1e66989cffe75295a483f5c0059b52c | # Copyright 2017 the pycolab Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defeat marauders from somewhere exterior to this planet.
Keys: left, right - move. space - fire. q - quit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import numpy as np
import sys
from pycolab import ascii_art
from pycolab import human_ui
from pycolab import rendering
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
# Not shown in this ASCII art diagram are the Sprites we use for laser blasts,
# which control the characters listed in UPWARD_BOLT_CHARS and
# DOWNWARD_BOLT_CHARS below.
GAME_ART = [' X X X X X X X X ', # Row 0
' X X X X X X X X ',
' X X X X X X X X ',
' X X X X X X X X ',
' X X X X X X X X ',
' ', # Row 5
' ',
' ',
' ',
' ',
' ', # Row 10. If a Marauder
' BBBB BBBB BBBB BBBB ', # makes it to row 10,
' BBBB BBBB BBBB BBBB ', # the game is over.
' BBBB BBBB BBBB BBBB ',
' ',
' P ']
# Characters listed in UPWARD_BOLT_CHARS are used for Sprites that represent
# laser bolts that the player shoots toward Marauders. Add more characters if
# you want to be able to have more than two of these bolts in the "air" at once.
UPWARD_BOLT_CHARS = 'abcd'
# Characters listed in DOWNWARD_BOLT_CHARS are used for Sprites that represent
# laser bolts that Marauders shoot toward the player. Add more charcters if you
# want more shooting from the Marauders.
DOWNWARD_BOLT_CHARS = 'yz'
# Shorthand for various points in the program:
_ALL_BOLT_CHARS = UPWARD_BOLT_CHARS + DOWNWARD_BOLT_CHARS
# To make life a bit easier for the player (and avoid the need for frame
# stacking), we use different characters to indicate the directions that the
# bolts go. If you'd like to make this game harder, you might try mapping both
# kinds of bolts to the same character.
LASER_REPAINT_MAPPING = dict(
[(b, '^') for b in UPWARD_BOLT_CHARS] +
[(b, '|') for b in DOWNWARD_BOLT_CHARS])
# These colours are only for humans to see in the CursesUi.
COLOURS_FG = {' ': (0, 0, 0), # Space, inky blackness of.
'X': (999, 999, 999), # The Marauders.
'B': (400, 50, 30), # The bunkers.
'P': (0, 999, 0), # The player.
'^': (0, 999, 999), # Bolts from player to aliens.
'|': (0, 999, 999)} # Bolts from aliens to player.
COLOURS_BG = {'^': (0, 0, 0), # Bolts from player to aliens.
'|': (0, 0, 0)} # Bolts from aliens to player.
#def _init_ ?
def make_game():
"""Builds and returns an Extraterrestrial Marauders game."""
return ascii_art.ascii_art_to_game(
GAME_ART, what_lies_beneath=' ',
sprites=dict(
[('P', PlayerSprite)] +
[(c, UpwardLaserBoltSprite) for c in UPWARD_BOLT_CHARS] +
[(c, DownwardLaserBoltSprite) for c in DOWNWARD_BOLT_CHARS]),
drapes=dict(X=MarauderDrape,
B=BunkerDrape),
update_schedule=['P', 'B', 'X'] + list(_ALL_BOLT_CHARS),
nb_action=5)
class BunkerDrape(plab_things.Drape):
"""A `Drape` for the bunkers at the bottom of the screen.
Bunkers are gradually eroded by laser bolts, for which the user loses one
point. Other than that, they don't really do much. If a laser bolt hits a
bunker, this Drape leaves a note about it in the Plot---the bolt's Sprite
checks this and removes itself from the board if it's present.
"""
def update(self, actions, board, layers, backdrop, things, the_plot):
# Where are the laser bolts? Bolts from players or marauders do damage.
bolts = np.logical_or.reduce([layers[c] for c in _ALL_BOLT_CHARS], axis=0)
hits = bolts & self.curtain # Any hits to a bunker?
np.logical_xor(self.curtain, hits, self.curtain) # If so, erode the bunker...
the_plot.add_reward(-np.sum(hits)) # ...and impose a penalty.
# Save the identities of bunker-striking bolts in the Plot.
the_plot['bunker_hitters'] = [chr(c) for c in board[hits]]
class MarauderDrape(plab_things.Drape):
"""A `Drape` for the marauders descending downward toward the player.
The Marauders all move in lockstep, which makes them an ideal application of
a Drape. Bits of the Drape get eroded by laser bolts from the player; each
hit earns ten points. If the Drape goes completely empty, or if any Marauder
makes it down to row 10, the game terminates.
As with `BunkerDrape`, if a laser bolt hits a Marauder, this Drape leaves a
note about it in the Plot; the bolt's Sprite checks this and removes itself
from the board if present.
"""
def __init__(self, curtain, character):
# The constructor just sets the Marauder's initial horizontal direction.
super(MarauderDrape, self).__init__(curtain, character)
self._dx = -1
def update(self, actions, board, layers, backdrop, things, the_plot):
# Where are the laser bolts? Only bolts from the player kill a Marauder.
bolts = np.logical_or.reduce([layers[c] for c in UPWARD_BOLT_CHARS], axis=0)
hits = bolts & self.curtain # Any hits to Marauders?
np.logical_xor(self.curtain, hits, self.curtain) # If so, zap the marauder...
the_plot.add_reward(np.sum(hits)*10) # ...and supply a reward.
# Save the identities of marauder-striking bolts in the Plot.
the_plot['marauder_hitters'] = [chr(c) for c in board[hits]]
# If no Marauders are left, or if any are sitting on row 10, end the game.
if (not self.curtain.any()) or self.curtain[10, :].any():
return the_plot.terminate_episode() # i.e. return None.
# We move faster if there are fewer Marauders. The odd divisor causes speed
# jumps to align on the high sides of multiples of 8; so, speed increases as
# the number of Marauders decreases to 32 (or 24 etc.), not 31 (or 23 etc.).
if the_plot.frame % max(1, np.sum(self.curtain)//8.0000001): return
# If any Marauder reaches either side of the screen, reverse horizontal
# motion and advance vertically one row.
if np.any(self.curtain[:, 0] | self.curtain[:, -1]):
self._dx = -self._dx
self.curtain[:] = np.roll(self.curtain, shift=1, axis=0)
self.curtain[:] = np.roll(self.curtain, shift=self._dx, axis=1)
class PlayerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for our player.
This `Sprite` simply ties actions to going left and right. In interactive
settings, the user can also quit.
"""
def __init__(self, corner, position, character):
"""Simply indicates to the superclass that we can't walk off the board."""
super(PlayerSprite, self).__init__(
corner, position, character, impassable='', confined_to_board=True)
def update(self, actions, board, layers, backdrop, things, the_plot):
del layers, backdrop, things # Unused.
if actions == 0: # go leftward?
self._west(board, the_plot)
elif actions == 1: # go rightward?
self._east(board, the_plot)
elif actions == 4: # quit?
the_plot.terminate_episode()
class UpwardLaserBoltSprite(prefab_sprites.MazeWalker):
"""Laser bolts shot from the player toward Marauders."""
def __init__(self, corner, position, character):
"""Starts the Sprite in a hidden position off of the board."""
super(UpwardLaserBoltSprite, self).__init__(
corner, position, character, impassable='')
self._teleport((-1, -1))
def update(self, actions, board, layers, backdrop, things, the_plot):
if self.visible:
self._fly(board, layers, things, the_plot)
elif actions == 2:
self._fire(layers, things, the_plot)
def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward Marauders."""
# Disappear if we've hit a Marauder or a bunker.
if (self.character in the_plot['bunker_hitters'] or
self.character in the_plot['marauder_hitters']):
return self._teleport((-1, -1))
# Otherwise, northward!
self._north(board, the_plot)
def _fire(self, layers, things, the_plot):
"""Launches a new bolt from the player."""
# We don't fire if the player fired another bolt just now.
if the_plot.get('last_player_shot') == the_plot.frame: return
the_plot['last_player_shot'] = the_plot.frame
# We start just above the player.
row, col = things['P'].position
self._teleport((row-1, col))
class DownwardLaserBoltSprite(prefab_sprites.MazeWalker):
"""Laser bolts shot from Marauders toward the player."""
def __init__(self, corner, position, character):
"""Starts the Sprite in a hidden position off of the board."""
super(DownwardLaserBoltSprite, self).__init__(
corner, position, character, impassable='')
self._teleport((-1, -1))
def update(self, actions, board, layers, backdrop, things, the_plot):
if self.visible:
self._fly(board, layers, things, the_plot)
else:
self._fire(layers, the_plot)
def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward the player."""
# Disappear if we've hit a bunker.
if self.character in the_plot['bunker_hitters']:
return self._teleport((-1, -1))
# End the game if we've hit the player.
if self.position == things['P'].position: the_plot.terminate_episode()
self._south(board, the_plot)
def _fire(self, layers, the_plot):
"""Launches a new bolt from a random Marauder."""
# We don't fire if another Marauder fired a bolt just now.
if the_plot.get('last_marauder_shot') == the_plot.frame: return
the_plot['last_marauder_shot'] = the_plot.frame
# Which Marauder should fire the laser bolt?
col = np.random.choice(np.nonzero(layers['X'].sum(axis=0))[0])
row = np.nonzero(layers['X'][:, col])[0][-1] + 1
# Move ourselves just below that Marauder.
self._teleport((row, col))
def main(argv=()):
del argv # Unused.
# Build an Extraterrestrial Marauders game.
game = make_game()
# Build an ObservationCharacterRepainter that will make laser bolts of the
# same type all look identical.
repainter = rendering.ObservationCharacterRepainter(LASER_REPAINT_MAPPING)
# Make a CursesUi to play it with.
ui = human_ui.CursesUi(
keys_to_actions={curses.KEY_LEFT: 0, curses.KEY_RIGHT: 1,
' ': 2, # shoot
-1: 3, # no-op
'q': 4}, # quit
repainter=repainter, delay=300,
colour_fg=COLOURS_FG, colour_bg=COLOURS_BG)
# Let the game begin!
ui.play(game)
if __name__ == '__main__':
main(sys.argv)
|
py | 1a48fc6ccd2c3ac2ca2e2ccaf7bdb7f28bd12755 | from typing import Tuple, Optional, Union
from torch import Tensor
from torch_sparse import SparseTensor
Adj = Union[Tensor, SparseTensor]
OptTensor = Optional[Tensor]
PairTensor = Tuple[Tensor, Tensor]
OptPairTensor = Tuple[Tensor, Optional[Tensor]]
PairOptTensor = Tuple[Optional[Tensor], Optional[Tensor]]
Size = Optional[Tuple[int, int]]
NoneType = Optional[Tensor]
|
py | 1a48fce753c2fe2aef70504ffbd01e0aa39d29a2 | """Utility for retrieveing the docstring of a dataclass's attributes
@author: Fabrice Normandin
"""
import inspect
import typing
from argparse import ArgumentTypeError
from dataclasses import dataclass
from typing import *
from logging import getLogger
logger = getLogger(__name__)
@dataclass
class AttributeDocString:
"""Simple dataclass for holding the comments of a given field."""
comment_above: str = ""
comment_inline: str = ""
docstring_below: str = ""
def get_attribute_docstring(
some_dataclass: Type, field_name: str
) -> AttributeDocString:
"""Returns the docstrings of a dataclass field.
NOTE: a docstring can either be:
- An inline comment, starting with <#>
- A Comment on the preceding line, starting with <#>
- A docstring on the following line, starting with either <\"\"\"> or <'''>
Arguments:
some_dataclass {type} -- a dataclass
field_name {str} -- the name of the field.
Returns:
AttributeDocString -- an object holding the three possible comments
"""
try:
source = inspect.getsource(some_dataclass)
except TypeError as e:
logger.debug(f"Couldn't find the attribute docstring: {e}")
return AttributeDocString()
code_lines: List[str] = source.splitlines()
# the first line is the class definition, we skip it.
start_line_index = 1
# starting at the second line, there might be the docstring for the class.
# We want to skip over that until we reach an attribute definition.
while start_line_index < len(code_lines):
if _contains_attribute_definition(code_lines[start_line_index]):
break
start_line_index += 1
lines_with_attribute_defs = [
(index, line)
for index, line in enumerate(code_lines)
if _contains_attribute_definition(line)
]
for i, line in lines_with_attribute_defs:
parts: List[str] = line.split(":", maxsplit=1)
if parts[0].strip() == field_name:
# we found the line with the definition of this field.
comment_above = _get_comment_ending_at_line(code_lines, i - 1)
comment_inline = _get_inline_comment_at_line(code_lines, i)
docstring_below = _get_docstring_starting_at_line(code_lines, i + 1)
complete_docstring = AttributeDocString(
comment_above, comment_inline, docstring_below
)
return complete_docstring
# we didn't find the attribute.
mro = inspect.getmro(some_dataclass)
if len(mro) == 1:
raise RuntimeWarning(
f"Couldn't find the given attribute name {field_name}' within the "
"given class."
)
base_class = mro[1]
try:
return get_attribute_docstring(base_class, field_name)
except OSError as e:
logger.warning(UserWarning(f"Couldn't find the docstring: {e}"))
return AttributeDocString()
def _contains_attribute_definition(line_str: str) -> bool:
"""Returns wether or not a line contains a an dataclass field definition.
Arguments:
line_str {str} -- the line content
Returns:
bool -- True if there is an attribute definition in the line.
"""
parts = line_str.split("#", maxsplit=1)
before_comment = parts[0].strip()
before_first_equal = before_comment.split("=", maxsplit=1)[0]
parts = before_first_equal.split(":")
if len(parts) != 2:
# For now, I don't think it's possible to have a type annotation contain :
return False
attr_name = parts[0]
attr_type = parts[1]
return not attr_name.isspace() and not attr_type.isspace()
def _is_empty(line_str: str) -> bool:
return line_str.strip() == ""
def _is_comment(line_str: str) -> bool:
return line_str.strip().startswith("#")
def _get_comment_at_line(code_lines: List[str], line: int) -> str:
"""Gets the comment at line `line` in `code_lines`.
Arguments:
line {int} -- the index of the line in code_lines
Returns:
str -- the comment at the given line. empty string if not present.
"""
line_str = code_lines[line]
assert not _contains_attribute_definition(line_str)
if "#" not in line_str:
return ""
parts = line_str.split("#", maxsplit=1)
comment = parts[1].strip()
return comment
def _get_inline_comment_at_line(code_lines: List[str], line: int) -> str:
"""Gets the inline comment at line `line`.
Arguments:
line {int} -- the index of the line in code_lines
Returns:
str -- the inline comment at the given line, else an empty string.
"""
assert 0 <= line < len(code_lines)
assert _contains_attribute_definition(code_lines[line])
line_str = code_lines[line]
parts = line_str.split("#", maxsplit=1)
if len(parts) != 2:
return ""
comment = parts[1].strip()
return comment
def _get_comment_ending_at_line(code_lines: List[str], line: int) -> str:
result = ""
start_line = line
end_line = line
# print(f"Get comment ending at line {line}")
# for i, l in enumerate(code_lines):
# print(f"line {i}: {l}")
# move up the code, one line at a time, while we don't hit the start,
# an attribute definition, or the end of a docstring.
while start_line > 0:
line_str = code_lines[start_line]
if _contains_attribute_definition(line_str):
break # previous line is an assignment
if '"""' in line_str or "'''" in line_str:
break # previous line has a docstring
start_line -= 1
start_line += 1
lines = []
for i in range(start_line, end_line + 1):
# print(f"line {i}: {code_lines[i]}")
if _is_empty(code_lines[i]):
continue
assert not _contains_attribute_definition(code_lines[i])
comment = _get_comment_at_line(code_lines, i)
lines.append(comment)
return "\n".join(lines)
def _get_docstring_starting_at_line(code_lines: List[str], line: int) -> str:
first_line = line
i = line
end_line: int
token: Optional[str] = None
triple_single = "'''"
triple_double = '"""'
# print("finding docstring starting from line", line)
# if we are looking further down than the end of the code, there is no
# docstring.
if line >= len(code_lines):
return ""
# the list of lines making up the docstring.
docstring_contents: List[str] = []
while i <= len(code_lines):
line_str = code_lines[i]
# print(f"(docstring) line {line}: {line_str}")
# we haven't identified the starting line yet.
if token is None:
if _is_empty(line_str):
i += 1
continue
elif _contains_attribute_definition(line_str) or _is_comment(line_str):
# we haven't reached the start of a docstring yet (since token
# is None), and we reached a line with an attribute definition,
# or a comment, hence the docstring is empty.
return ""
elif triple_single in line_str and triple_double in line_str:
# This handles something stupid like:
# @dataclass
# class Bob:
# a: int
# """ hello '''
# bob
# ''' bye
# """
triple_single_index = line_str.index(triple_single)
triple_double_index = line_str.index(triple_double)
if triple_single_index < triple_double_index:
token = triple_single
else:
token = triple_double
elif triple_double in line_str:
token = triple_double
elif triple_single in line_str:
token = triple_single
else:
# for i, line in enumerate(code_lines):
# print(f"line {i}: <{line}>")
# print(f"token: <{token}>")
# print(line_str)
logger.debug(
f"Warning: Unable to parse attribute docstring: {line_str}"
)
return ""
# get the string portion of the line (after a token or possibly
# between two tokens).
parts = line_str.split(token, maxsplit=2)
if len(parts) == 3:
# This takes care of cases like:
# @dataclass
# class Bob:
# a: int
# """ hello """
between_tokens = parts[1].strip()
# print("Between tokens:", between_tokens)
docstring_contents.append(between_tokens)
break
elif len(parts) == 2:
after_token = parts[1].strip()
# print("After token:", after_token)
docstring_contents.append(after_token)
else:
# print(f"token is <{token}>")
if token in line_str:
# print(f"Line {line} End of a docstring:", line_str)
before = line_str.split(token, maxsplit=1)[0]
docstring_contents.append(before.strip())
break
else:
# intermediate line without the token.
docstring_contents.append(line_str.strip())
i += 1
# print("Docstring contents:", docstring_contents)
return "\n".join(docstring_contents)
|
py | 1a48fd58359c3f2e4e6330c8811589b2274387aa | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=38
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.rx(2.708052867394402).on(input_qubit[1])) # number=11
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.Y.on(input_qubit[2])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=36
c.append(cirq.H.on(input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.Z.on(input_qubit[1])) # number=20
c.append(cirq.Z.on(input_qubit[3])) # number=31
c.append(cirq.H.on(input_qubit[0])) # number=22
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=23
c.append(cirq.H.on(input_qubit[0])) # number=24
c.append(cirq.Z.on(input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=29
c.append(cirq.H.on(input_qubit[3])) # number=30
c.append(cirq.Z.on(input_qubit[3])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=32
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[0])) # number=34
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2905.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
py | 1a48fd59c73770700874a7768a26fa922bd5cbda | from openpyxl import Workbook
from openpyxl.styles import PatternFill
from openpyxl.formatting.rule import FormulaRule
wb = Workbook()
ws = wb.active
ws.cell(2, 1).value = '空白ではない'
ws.cell(4, 1).value = 5.3529
orange_fill = PatternFill('solid', start_color='FFA500', end_color='FFA500')
is_blank_rule = FormulaRule( #←Excelの数式を用いた設定
formula=['ISBLANK(INDIRECT(ADDRESS(ROW(), COLUMN())))'],
stopIfTrue=True,
fill=orange_fill
)
ws.conditional_formatting.add(f'A1:A5', is_blank_rule)
ws.title = 'クラシック(数式)'
wb.save('classic_formula.xlsx')
|
py | 1a48fef376a2e1dda6b70394dbe1806433c9e6c7 | import logging
import time
from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
from . import defaults
from .connection import get_redis_from_settings
from .bucket_hash import BucketHash
logger = logging.getLogger(__name__)
# TODO: Rename class to RedisDupeFilter.
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def __init__(self, server, key, debug=False, bucket_size=100):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.exist_bucket_key = self.key + "_exist_bucket"
self.debug = debug
self.logdupes = True
self.bucket = BucketHash(bucket_size)
@classmethod
def from_settings(cls, settings):
"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
bucket_size = settings.getint('SCHEDULER_QUEUE_BUCKET_SIZE')
return cls(server, key=key, debug=debug, bucket_size=bucket_size)
@classmethod
def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = self.request_fingerprint(request)
# This returns the number of values added, zero if already exists.
logger.debug("[request_seen] fp is {} start".format(fp))
bucket = str(self.bucket.mapping(fp))
bucket_key = self.key + "_" + bucket
added = self.server.sadd(bucket_key, fp)
self.server.sadd(self.exist_bucket_key, bucket)
# 需要在结束时清理这些key
logger.debug("[request_seen] fp is {} end".format(fp))
return added == 0
def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request)
@classmethod
def from_spider(cls, spider):
settings = spider.settings
server = get_redis_from_settings(settings)
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
key = dupefilter_key % {'spider': spider.name}
debug = settings.getbool('DUPEFILTER_DEBUG')
bucket_size = settings.getint('SCHEDULER_QUEUE_BUCKET_SIZE')
logger.info("dupefiler bucket size is {}".format(bucket_size))
return cls(server, key=key, debug=debug, bucket_size=bucket_size)
def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()
def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key)
def log(self, request, spider):
"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
|
py | 1a48ff01cabe164e72d8f1b64353d48a4be49d51 | template_open = '{{#ctx.payload.aggregations.result.hits.hits.0._source}}'
template_close = template_open.replace('{{#','{{/')
kibana_url = (
"{{ctx.metadata.kibana_url}}/app/kibana#/discover?"
"_a=(columns:!(_source),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,"
"index:'metricbeat-*',key:query,negate:!f,type:custom,value:''),"
"query:(bool:(must:!((regexp:(kubernetes.pod.name:'{{ctx.metadata.regex}}')),"
"(match:(metricset.name:'state_pod')),"
"(match:(kubernetes.namespace:{{ctx.metadata.namespace}}))))))),"
"index:'metricbeat-*',"
"interval:auto,query:(language:lucene,query:''),"
"regexp:(language:lucene,query:'kubernetes.pod.name:test-nginx-%5B%5E-%5D%20-%5B%5E-%5D%20'),"
"sort:!('@timestamp',desc),time:(from:now%2FM,mode:quick,to:now%2FM))"
"&_g=(refreshInterval:(display:Off,pause:!f,value:0),"
"time:(from:now-15m,mode:quick,to:now))"
)
watch_url = "{{ctx.metadata.kibana_url}}/app/management/insightsAndAlerting/watcher/watches/watch/{{ctx.metadata.name}}/status"
slack_alert_template = "{template_open}*<{kibana_url}|{{{{ctx.metadata.name}}}}>* has `{{{{ctx.payload.aggregations.pods.value}}}}` not ready pod(s) <{watch_url}|[ack]>{{{{#ctx.metadata.docs}}}} <{{{{.}}}}|[docs]>{{{{/ctx.metadata.docs}}}}{template_close}".format(**locals())
email_alert_template = "{template_open}<a href=\"{kibana_url}\">{{{{ctx.metadata.name}}}}</a> has {{{{ctx.payload.aggregations.pods.value}}}} not ready pod(s) <a href=\"{watch_url}\">[ack]</a>{{{{#ctx.metadata.docs}}}} <a href=\"{{{{.}}}}\">[docs]</a>{{{{/ctx.metadata.docs}}}}{template_close}".format(**locals())
k8s_template = {
"metadata": {
"name": "",
"namespace": "",
"regex": "",
"kibana_url": "",
"kibana_dashboard": "",
"docs": "",
"xpack" : {
"type" : "json"
},
},
"trigger": {
"schedule": {
"interval": ""
}
},
"input": {
"search": {
"request": {
"search_type": "query_then_fetch",
"indices": [
"metricbeat-*"
],
"rest_total_hits_as_int": True,
"body": {
"aggs": {
"result": {
"top_hits": {
"size": 1
}
},
"pods": {
"cardinality": {
"field": "kubernetes.pod.name"
}
},
"not_ready": {
"terms": {
"field": "kubernetes.pod.name",
"min_doc_count": 12,
"size": 100
}
}
},
"query": {
"bool": {
"must_not": [],
"must": [],
"filter": [
{
"range": {
"@timestamp": {
"gte": "now-{{ctx.metadata.window}}"
}
}
}
]
}
}
}
}
}
},
"condition": {},
"actions": {
"email_admin": {
"throttle_period_in_millis": 300000,
"email": {
"profile": "standard",
"subject": "{{#ctx.payload.aggregations.result.hits.hits.0._source}}{{ctx.metadata.name}} has {{ctx.payload.aggregations.pods.value}} not ready pod(s){{/ctx.payload.aggregations.result.hits.hits.0._source}}",
"body": {
"html": email_alert_template
}
}
},
"notify-slack": {
"throttle_period_in_millis": 300000,
"slack": {
"message": {
"text": slack_alert_template
}
}
}
}
}
metricbeat_template = {
"metadata": {
"window": "300s",
"subject": "No metricbeat data has been recieved in the last 5 minutes!"
},
"trigger": {
"schedule": {
"interval": "60s"
}
},
"input": {
"search": {
"request": {
"search_type": "query_then_fetch",
"indices": [
"metricbeat-*"
],
"rest_total_hits_as_int": True,
"body": {
"query": {
"bool": {
"must": [
{
"match": {
"metricset.name": "state_pod"
}
}
],
"filter": [
{
"range": {
"@timestamp": {
"gte": "now-{{ctx.metadata.window}}"
}
}
}
]
}
}
}
}
}
},
"condition": {
"compare": {
"ctx.payload.hits.total": {
"eq": 0
}
}
},
"actions": {
"email_admin": {
"throttle_period_in_millis": 300000,
"email": {
"profile": "standard",
"subject": "{{ctx.metadata.subject}}",
"body": {
"text": "{{ctx.metadata.message}}"
}
}
},
"notify-slack": {
"throttle_period_in_millis": 300000,
"slack": {
"message": {
"text": "{{ctx.metadata.message}}"
}
}
}
}
}
|
py | 1a48ff14246b152706019b6913f694c92518955c | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?pandalive\.co\.kr/"
))
class Pandalive(Plugin):
_room_id_re = re.compile(r"roomid\s*=\s*String\.fromCharCode\((.*)\)")
def _get_streams(self):
media_code = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//script[contains(text(), 'roomid')]/text()"),
validate.any(None, validate.all(
validate.transform(self._room_id_re.search),
validate.any(None, validate.all(
validate.get(1),
validate.transform(lambda s: "".join(map(lambda c: chr(int(c)), s.split(",")))),
)),
)),
))
if not media_code:
return
log.debug("Media code: {0}".format(media_code))
json = self.session.http.post(
"https://api.pandalive.co.kr/v1/live/play",
data={"action": "watch", "mediaCode": media_code},
schema=validate.Schema(
validate.parse_json(), {
validate.optional("media"): {
"title": validate.text,
"userId": validate.text,
"userNick": validate.text,
"isPw": bool,
"isLive": bool,
"liveType": validate.text,
},
validate.optional("PlayList"): {
"hls2": [{
"url": validate.url(),
}],
},
"result": bool,
"message": validate.text,
},
)
)
if not json["result"]:
log.error(json["message"])
return
if not json["media"]["isLive"]:
log.error("The broadcast has ended")
return
if json["media"]["isPw"]:
log.error("The broadcast is password protected")
return
log.info("Broadcast type: {0}".format(json['media']['liveType']))
self.author = "{0} ({1})".format(json['media']['userNick'], json['media']['userId'])
self.title = "{0}".format(json['media']['title'])
return HLSStream.parse_variant_playlist(self.session, json["PlayList"]["hls2"][0]["url"])
__plugin__ = Pandalive
|
py | 1a48ff36a8ddb30588f2fe572b7545d735f8a116 | # coding=utf-8
"""
Ingest data from the command-line.
python srtm_prepare.py --output Elevation_1secSRTM_DEMs_v1.0_DEM_Mosaic_dem1sv1_0.yaml \
/g/data/rr1/Elevation/NetCDF/1secSRTM_DEMs_v1.0/DEM/Elevation_1secSRTM_DEMs_v1.0_DEM_Mosaic_dem1sv1_0.nc
"""
from __future__ import absolute_import
import uuid
from dateutil.parser import parse
import yaml
import click
import netCDF4
import os
def prepare_layers(images):
layerdict = {}
for i in images:
image = netCDF4.Dataset(i)
layerpath = str(image.filepath())
for targetlayer in image.variables.values():
if targetlayer.name not in ['crs', 'lat', 'lon']:
layername = str(targetlayer.name)
layerdict[layername] = {'path': layerpath, 'layer': layername, }
return layerdict
def prepare_dataset(image, datasets):
image = netCDF4.Dataset(image)
projection = image.variables.values()[0].spatial_ref
geotransform = ((str(image.variables.values()[0].GeoTransform)).split())
fgeotransform = [float(i) for i in geotransform]
lon_pixels = int(image.dimensions.values()[0].size)
lat_pixels = int(image.dimensions.values()[1].size)
left, right = float(fgeotransform[0]), float(fgeotransform[0] + (lon_pixels * fgeotransform[1]))
bottom, top = float(fgeotransform[3] + (lat_pixels * fgeotransform[5])), float(fgeotransform[3])
return {
'id': str(uuid.uuid4()),
'processing_level': 'modelled',
'product_type': 'DEM',
'creation_dt': parse(image.history[0:24]).isoformat(),
'platform': {'code': 'Space Shuttle Endeavour'},
'instrument': {'name': 'SIR'},
'extent': {
'coord': {
'ul': {'lon': left, 'lat': top},
'ur': {'lon': right, 'lat': top},
'll': {'lon': left, 'lat': bottom},
'lr': {'lon': right, 'lat': bottom},
},
'from_dt': parse(image.history[0:24]).isoformat(),
'to_dt': parse(image.history[0:24]).isoformat(),
'center_dt': parse(image.history[0:24]).isoformat(),
},
'format': {'name': 'NETCDF'},
'grid_spatial': {
'projection': {
'spatial_reference': projection,
'geo_ref_points': {
'ul': {'x': left, 'y': top},
'ur': {'x': right, 'y': top},
'll': {'x': left, 'y': bottom},
'lr': {'x': right, 'y': bottom},
}
}
},
'image': {
'bands': prepare_layers(datasets)
},
'lineage': {'source_datasets': {}},
}
@click.command(help="Prepare single layer netcdf with common grid spec for ingestion to Data Cube.")
@click.argument('datasets', type=click.Path(exists=True, readable=True), nargs=-1)
@click.option('--output', help="Write datasets into this file", type=click.Path(exists=False, writable=True))
def main(datasets, output):
with open(output, 'w') as stream:
yaml.dump((prepare_dataset(datasets[0], datasets)), stream)
if __name__ == "__main__":
main()
|
py | 1a48ff634e0034905adf0826c0e029e6e89bc999 | '''
By Benjamin
'''
class Dealer:
'''
INPUT: None
This is the dealer's class.
'''
def __init__(self):
self.cards = [] # Place holder for the dealer's cards
self.hidden_card = 0 # Place holder for the hidden card
# ----------------------------
def hide_first_card(self):
'''
INPUT: None
Hides the first card given to the card.
'''
try:
self.hidden_card = self.cards[0]
except IndexError:
print('The dealer doe not have any card currenty.')
self.cards[0] = 'X'
# --------------------------------
def reveal_hidden_card(self):
'''
INPUT: None
Reveals the dealer's hidden card if this one exists.
'''
# Checking if a card has been hidden previously
if self.hide_first_card != 0:
self.cards[0] = self.hidden_card
else:
print('The dealer has no hidden card currently.')
|
py | 1a48fff077b4c1ecada1e12297a8aebf54ee9d6d | # Copyright 2018 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon_cli
import filelock
import glob
import logging
import numpy as np
import os
import re
import sh
import struct
import subprocess
import sys
import time
import urllib
from enum import Enum
import common
sys.path.insert(0, "mace/python/tools")
try:
from encrypt_opencl_codegen import encrypt_opencl_codegen
from binary_codegen import tuning_param_codegen
from generate_data import generate_input_data
from validate import validate
from mace_engine_factory_codegen import gen_mace_engine_factory
except Exception as e:
print("Import error:\n%s" % e)
exit(1)
################################
# common
################################
def strip_invalid_utf8(str):
return sh.iconv(str, "-c", "-t", "UTF-8")
def split_stdout(stdout_str):
stdout_str = strip_invalid_utf8(stdout_str)
# Filter out last empty line
return [l.strip() for l in stdout_str.split('\n') if len(l.strip()) > 0]
def make_output_processor(buff):
def process_output(line):
print(line.rstrip())
buff.append(line)
return process_output
def device_lock_path(serialno):
return "/tmp/device-lock-%s" % serialno
def device_lock(serialno, timeout=3600):
return filelock.FileLock(device_lock_path(serialno), timeout=timeout)
def is_device_locked(serialno):
try:
with device_lock(serialno, timeout=0.000001):
return False
except filelock.Timeout:
return True
class BuildType(object):
proto = 'proto'
code = 'code'
def stdout_success(stdout):
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if "Aborted" in line or "FAILED" in line or \
"Segmentation fault" in line:
return False
return True
################################
# clear data
################################
def clear_phone_data_dir(serialno, phone_data_dir):
sh.adb("-s",
serialno,
"shell",
"rm -rf %s" % phone_data_dir)
def clear_model_codegen(model_codegen_dir="mace/codegen/models"):
if os.path.exists(model_codegen_dir):
sh.rm("-rf", model_codegen_dir)
################################
# adb commands
################################
def adb_devices():
serialnos = []
p = re.compile(r'(\w+)\s+device')
for line in split_stdout(sh.adb("devices")):
m = p.match(line)
if m:
serialnos.append(m.group(1))
return serialnos
def get_soc_serialnos_map():
serialnos = adb_devices()
soc_serialnos_map = {}
for serialno in serialnos:
props = adb_getprop_by_serialno(serialno)
soc_serialnos_map.setdefault(props["ro.board.platform"], [])\
.append(serialno)
return soc_serialnos_map
def get_target_socs_serialnos(target_socs=None):
soc_serialnos_map = get_soc_serialnos_map()
serialnos = []
if target_socs is None:
target_socs = soc_serialnos_map.keys()
for target_soc in target_socs:
serialnos.extend(soc_serialnos_map[target_soc])
return serialnos
def get_soc_serial_number_map():
serial_numbers = adb_devices()
soc_serial_number_map = {}
for num in serial_numbers:
props = adb_getprop_by_serialno(num)
soc_serial_number_map[props["ro.board.platform"]] = num
return soc_serial_number_map
def get_target_soc_serial_number(target_soc):
soc_serial_number_map = get_soc_serial_number_map()
serial_number = None
if target_soc in soc_serial_number_map:
serial_number = soc_serial_number_map[target_soc]
return serial_number
def adb_getprop_by_serialno(serialno):
outputs = sh.adb("-s", serialno, "shell", "getprop")
raw_props = split_stdout(outputs)
props = {}
p = re.compile(r'\[(.+)\]: \[(.+)\]')
for raw_prop in raw_props:
m = p.match(raw_prop)
if m:
props[m.group(1)] = m.group(2)
return props
def adb_get_device_name_by_serialno(serialno):
props = adb_getprop_by_serialno(serialno)
return props.get("ro.product.model", "").replace(' ', '')
def adb_supported_abis(serialno):
props = adb_getprop_by_serialno(serialno)
abilist_str = props["ro.product.cpu.abilist"]
abis = [abi.strip() for abi in abilist_str.split(',')]
return abis
def adb_get_all_socs():
socs = []
for d in adb_devices():
props = adb_getprop_by_serialno(d)
socs.append(props["ro.board.platform"])
return set(socs)
def adb_push(src_path, dst_path, serialno):
print("Push %s to %s" % (src_path, dst_path))
sh.adb("-s", serialno, "push", src_path, dst_path)
def adb_pull(src_path, dst_path, serialno):
print("Pull %s to %s" % (src_path, dst_path))
try:
sh.adb("-s", serialno, "pull", src_path, dst_path)
except Exception as e:
print("Error msg: %s" % e.stderr)
def adb_run(abi,
serialno,
host_bin_path,
bin_name,
args="",
opencl_profiling=True,
vlog_level=0,
device_bin_path="/data/local/tmp/mace",
out_of_range_check=True,
address_sanitizer=False):
host_bin_full_path = "%s/%s" % (host_bin_path, bin_name)
device_bin_full_path = "%s/%s" % (device_bin_path, bin_name)
props = adb_getprop_by_serialno(serialno)
print(
"====================================================================="
)
print("Trying to lock device %s" % serialno)
with device_lock(serialno):
print("Run on device: %s, %s, %s" %
(serialno, props["ro.board.platform"],
props["ro.product.model"]))
sh.adb("-s", serialno, "shell", "rm -rf %s" % device_bin_path)
sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_bin_path)
adb_push(host_bin_full_path, device_bin_full_path, serialno)
ld_preload = ""
if address_sanitizer:
adb_push(find_asan_rt_library(abi), device_bin_path, serialno)
ld_preload = "LD_PRELOAD=%s/%s" % (device_bin_path,
asan_rt_library_names(abi)),
opencl_profiling = 1 if opencl_profiling else 0
out_of_range_check = 1 if out_of_range_check else 0
print("Run %s" % device_bin_full_path)
stdout_buff = []
process_output = make_output_processor(stdout_buff)
sh.adb(
"-s",
serialno,
"shell",
ld_preload,
"MACE_OUT_OF_RANGE_CHECK=%d" % out_of_range_check,
"MACE_OPENCL_PROFILING=%d" % opencl_profiling,
"MACE_CPP_MIN_VLOG_LEVEL=%d" % vlog_level,
device_bin_full_path,
args,
_tty_in=True,
_out=process_output,
_err_to_out=True)
return "".join(stdout_buff)
################################
# Toolchain
################################
def asan_rt_library_names(abi):
asan_rt_names = {
"armeabi-v7a": "libclang_rt.asan-arm-android.so",
"arm64-v8a": "libclang_rt.asan-aarch64-android.so",
}
return asan_rt_names[abi]
def find_asan_rt_library(abi, asan_rt_path=''):
if not asan_rt_path:
find_path = os.environ['ANDROID_NDK_HOME']
candidates = split_stdout(sh.find(find_path, "-name",
asan_rt_library_names(abi)))
if len(candidates) == 0:
common.MaceLogger.error(
"Toolchain",
"Can't find AddressSanitizer runtime library in % s" %
find_path)
elif len(candidates) > 1:
common.MaceLogger.info(
"More than one AddressSanitizer runtime library, use the 1st")
return candidates[0]
return "%s/%s" % (asan_rt_path, asan_rt_library_names(abi))
################################
# bazel commands
################################
def bazel_build(target,
abi="armeabi-v7a",
hexagon_mode=False,
enable_openmp=True,
enable_neon=True,
address_sanitizer=False,
extra_args=""):
print("* Build %s with ABI %s" % (target, abi))
if abi == "host":
bazel_args = (
"build",
"--define",
"openmp=%s" % str(enable_openmp).lower(),
target,
)
else:
bazel_args = (
"build",
target,
"--config",
"android",
"--cpu=%s" % abi,
"--define",
"neon=%s" % str(enable_neon).lower(),
"--define",
"openmp=%s" % str(enable_openmp).lower(),
"--define",
"hexagon=%s" % str(hexagon_mode).lower())
if address_sanitizer:
bazel_args += ("--config", "asan")
else:
bazel_args += ("--config", "optimization")
if extra_args:
bazel_args += (extra_args, )
sh.bazel(
_fg=True,
*bazel_args)
print("Build done!\n")
def bazel_build_common(target, build_args=""):
stdout_buff = []
process_output = make_output_processor(stdout_buff)
sh.bazel(
"build",
target + build_args,
_tty_in=True,
_out=process_output,
_err_to_out=True)
return "".join(stdout_buff)
def bazel_target_to_bin(target):
# change //mace/a/b:c to bazel-bin/mace/a/b/c
prefix, bin_name = target.split(':')
prefix = prefix.replace('//', '/')
if prefix.startswith('/'):
prefix = prefix[1:]
host_bin_path = "bazel-bin/%s" % prefix
return host_bin_path, bin_name
################################
# mace commands
################################
def gen_encrypted_opencl_source(codegen_path="mace/codegen"):
sh.mkdir("-p", "%s/opencl" % codegen_path)
encrypt_opencl_codegen("./mace/kernels/opencl/cl/",
"mace/codegen/opencl/opencl_encrypt_program.cc")
def gen_mace_engine_factory_source(model_tags,
model_load_type,
embed_model_data,
codegen_path="mace/codegen"):
print("* Generate mace engine creator source")
codegen_tools_dir = "%s/engine" % codegen_path
sh.rm("-rf", codegen_tools_dir)
sh.mkdir("-p", codegen_tools_dir)
gen_mace_engine_factory(
model_tags,
"mace/python/tools",
model_load_type,
embed_model_data,
codegen_tools_dir)
print("Generate mace engine creator source done!\n")
def pull_binaries(abi, serialno, model_output_dirs,
cl_built_kernel_file_name):
compiled_opencl_dir = "/data/local/tmp/mace_run/interior/"
mace_run_param_file = "mace_run.config"
cl_bin_dirs = []
for d in model_output_dirs:
cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
cl_bin_dirs_str = ",".join(cl_bin_dirs)
if cl_bin_dirs:
cl_bin_dir = cl_bin_dirs_str
if os.path.exists(cl_bin_dir):
sh.rm("-rf", cl_bin_dir)
sh.mkdir("-p", cl_bin_dir)
if abi != "host":
adb_pull(compiled_opencl_dir + cl_built_kernel_file_name,
cl_bin_dir, serialno)
adb_pull("/data/local/tmp/mace_run/%s" % mace_run_param_file,
cl_bin_dir, serialno)
def merge_opencl_binaries(binaries_dirs,
cl_compiled_program_file_name,
output_file_path):
platform_info_key = 'mace_opencl_precompiled_platform_info_key'
cl_bin_dirs = []
for d in binaries_dirs:
cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
# create opencl binary output dir
opencl_binary_dir = os.path.dirname(output_file_path)
if not os.path.exists(opencl_binary_dir):
sh.mkdir("-p", opencl_binary_dir)
kvs = {}
for binary_dir in cl_bin_dirs:
binary_path = os.path.join(binary_dir, cl_compiled_program_file_name)
if not os.path.exists(binary_path):
continue
print 'generate opencl code from', binary_path
with open(binary_path, "rb") as f:
binary_array = np.fromfile(f, dtype=np.uint8)
idx = 0
size, = struct.unpack("Q", binary_array[idx:idx + 8])
idx += 8
for _ in xrange(size):
key_size, = struct.unpack("i", binary_array[idx:idx + 4])
idx += 4
key, = struct.unpack(
str(key_size) + "s", binary_array[idx:idx + key_size])
idx += key_size
value_size, = struct.unpack("i", binary_array[idx:idx + 4])
idx += 4
if key == platform_info_key and key in kvs:
common.mace_check(
(kvs[key] == binary_array[idx:idx + value_size]).all(),
"",
"There exists more than one OpenCL version for models:"
" %s vs %s " %
(kvs[key], binary_array[idx:idx + value_size]))
else:
kvs[key] = binary_array[idx:idx + value_size]
idx += value_size
output_byte_array = bytearray()
data_size = len(kvs)
output_byte_array.extend(struct.pack("Q", data_size))
for key, value in kvs.iteritems():
key_size = len(key)
output_byte_array.extend(struct.pack("i", key_size))
output_byte_array.extend(struct.pack(str(key_size) + "s", key))
value_size = len(value)
output_byte_array.extend(struct.pack("i", value_size))
output_byte_array.extend(value)
np.array(output_byte_array).tofile(output_file_path)
def gen_tuning_param_code(model_output_dirs,
codegen_path="mace/codegen"):
mace_run_param_file = "mace_run.config"
cl_bin_dirs = []
for d in model_output_dirs:
cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
cl_bin_dirs_str = ",".join(cl_bin_dirs)
tuning_codegen_dir = "%s/tuning/" % codegen_path
if not os.path.exists(tuning_codegen_dir):
sh.mkdir("-p", tuning_codegen_dir)
tuning_param_variable_name = "kTuningParamsData"
tuning_param_codegen(cl_bin_dirs_str,
mace_run_param_file,
"%s/tuning_params.cc" % tuning_codegen_dir,
tuning_param_variable_name)
def gen_mace_version(codegen_path="mace/codegen"):
sh.mkdir("-p", "%s/version" % codegen_path)
sh.bash("mace/tools/git/gen_version_source.sh",
"%s/version/version.cc" % codegen_path)
def gen_model_code(model_codegen_dir,
platform,
model_file_path,
weight_file_path,
model_sha256_checksum,
weight_sha256_checksum,
input_nodes,
output_nodes,
runtime,
model_tag,
input_shapes,
dsp_mode,
embed_model_data,
winograd,
obfuscate,
model_build_type,
data_type,
graph_optimize_options):
bazel_build_common("//mace/python/tools:converter")
if os.path.exists(model_codegen_dir):
sh.rm("-rf", model_codegen_dir)
sh.mkdir("-p", model_codegen_dir)
sh.python("bazel-bin/mace/python/tools/converter",
"-u",
"--platform=%s" % platform,
"--model_file=%s" % model_file_path,
"--weight_file=%s" % weight_file_path,
"--model_checksum=%s" % model_sha256_checksum,
"--weight_checksum=%s" % weight_sha256_checksum,
"--input_node=%s" % input_nodes,
"--output_node=%s" % output_nodes,
"--runtime=%s" % runtime,
"--template=%s" % "mace/python/tools",
"--model_tag=%s" % model_tag,
"--input_shape=%s" % input_shapes,
"--dsp_mode=%s" % dsp_mode,
"--embed_model_data=%s" % embed_model_data,
"--winograd=%s" % winograd,
"--obfuscate=%s" % obfuscate,
"--output_dir=%s" % model_codegen_dir,
"--model_build_type=%s" % model_build_type,
"--data_type=%s" % data_type,
"--graph_optimize_options=%s" % graph_optimize_options,
_fg=True)
def gen_random_input(model_output_dir,
input_nodes,
input_shapes,
input_files,
input_ranges,
input_file_name="model_input"):
for input_name in input_nodes:
formatted_name = common.formatted_file_name(
input_file_name, input_name)
if os.path.exists("%s/%s" % (model_output_dir, formatted_name)):
sh.rm("%s/%s" % (model_output_dir, formatted_name))
input_nodes_str = ",".join(input_nodes)
input_shapes_str = ":".join(input_shapes)
input_ranges_str = ":".join(input_ranges)
generate_input_data("%s/%s" % (model_output_dir, input_file_name),
input_nodes_str,
input_shapes_str,
input_ranges_str)
input_file_list = []
if isinstance(input_files, list):
input_file_list.extend(input_files)
else:
input_file_list.append(input_files)
if len(input_file_list) != 0:
input_name_list = []
if isinstance(input_nodes, list):
input_name_list.extend(input_nodes)
else:
input_name_list.append(input_nodes)
if len(input_file_list) != len(input_name_list):
raise Exception('If input_files set, the input files should '
'match the input names.')
for i in range(len(input_file_list)):
if input_file_list[i] is not None:
dst_input_file = model_output_dir + '/' + \
common.formatted_file_name(input_file_name,
input_name_list[i])
if input_file_list[i].startswith("http://") or \
input_file_list[i].startswith("https://"):
urllib.urlretrieve(input_file_list[i], dst_input_file)
else:
sh.cp("-f", input_file_list[i], dst_input_file)
def update_mace_run_lib(build_tmp_binary_dir, linkshared=0):
if linkshared == 0:
mace_run_filepath = build_tmp_binary_dir + "/mace_run_static"
else:
mace_run_filepath = build_tmp_binary_dir + "/mace_run_shared"
if os.path.exists(mace_run_filepath):
sh.rm("-rf", mace_run_filepath)
if linkshared == 0:
sh.cp("-f", "bazel-bin/mace/tools/validation/mace_run_static",
build_tmp_binary_dir)
else:
sh.cp("-f", "bazel-bin/mace/tools/validation/mace_run_shared",
build_tmp_binary_dir)
def touch_tuned_file_flag(build_tmp_binary_dir):
sh.touch(build_tmp_binary_dir + '/tuned')
def is_binary_tuned(build_tmp_binary_dir):
return os.path.exists(build_tmp_binary_dir + '/tuned')
def create_internal_storage_dir(serialno, phone_data_dir):
internal_storage_dir = "%s/interior/" % phone_data_dir
sh.adb("-s", serialno, "shell", "mkdir", "-p", internal_storage_dir)
return internal_storage_dir
def update_libmace_shared_library(serial_num,
abi,
project_name,
build_output_dir,
library_output_dir):
library_dir = "%s/%s/%s/%s" % (
build_output_dir, project_name, library_output_dir, abi)
if os.path.exists(library_dir):
sh.rm("-rf", library_dir)
sh.mkdir("-p", library_dir)
sh.cp("-f", "bazel-bin/mace/libmace.so", library_dir)
sh.cp("-f",
"%s/sources/cxx-stl/gnu-libstdc++/4.9/libs/%s/libgnustl_shared.so" %
(os.environ["ANDROID_NDK_HOME"], abi),
library_dir)
if os.path.exists("mace/libmace.so"):
sh.rm("-f", "mace/libmace.so")
sh.cp("-f", "bazel-bin/mace/libmace.so", "mace/")
def tuning_run(abi,
serialno,
target_dir,
target_name,
vlog_level,
embed_model_data,
model_output_dir,
input_nodes,
output_nodes,
input_shapes,
output_shapes,
mace_model_dir,
model_tag,
device_type,
running_round,
restart_round,
limit_opencl_kernel_time,
tuning,
out_of_range_check,
phone_data_dir,
build_type,
opencl_binary_file,
shared_library_dir,
omp_num_threads=-1,
cpu_affinity_policy=1,
gpu_perf_hint=3,
gpu_priority_hint=3,
input_file_name="model_input",
output_file_name="model_out",
runtime_failure_ratio=0.0,
address_sanitizer=False,
linkshared=0):
print("* Run '%s' with round=%s, restart_round=%s, tuning=%s, "
"out_of_range_check=%s, omp_num_threads=%s, cpu_affinity_policy=%s, "
"gpu_perf_hint=%s, gpu_priority_hint=%s" %
(model_tag, running_round, restart_round, str(tuning),
str(out_of_range_check), omp_num_threads, cpu_affinity_policy,
gpu_perf_hint, gpu_priority_hint))
mace_model_path = ""
if build_type == BuildType.proto:
mace_model_path = "%s/%s.pb" % (mace_model_dir, model_tag)
if abi == "host":
p = subprocess.Popen(
[
"env",
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"MACE_RUNTIME_FAILURE_RATIO=%f" % runtime_failure_ratio,
"%s/%s" % (target_dir, target_name),
"--model_name=%s" % model_tag,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (model_output_dir, input_file_name),
"--output_file=%s/%s" % (model_output_dir, output_file_name),
"--model_data_file=%s/%s.data" % (mace_model_dir, model_tag),
"--device=%s" % device_type,
"--round=%s" % running_round,
"--restart_round=%s" % restart_round,
"--omp_num_threads=%s" % omp_num_threads,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate()
stdout = err + out
print stdout
print("Running finished!\n")
return stdout
else:
sh.adb("-s", serialno, "shell", "mkdir", "-p", phone_data_dir)
internal_storage_dir = create_internal_storage_dir(
serialno, phone_data_dir)
for input_name in input_nodes:
formatted_name = common.formatted_file_name(input_file_name,
input_name)
adb_push("%s/%s" % (model_output_dir, formatted_name),
phone_data_dir, serialno)
if address_sanitizer:
adb_push(find_asan_rt_library(abi), phone_data_dir, serialno)
if not embed_model_data:
adb_push("%s/%s.data" % (mace_model_dir, model_tag),
phone_data_dir, serialno)
if device_type == common.DeviceType.GPU\
and os.path.exists(opencl_binary_file):
adb_push(opencl_binary_file, phone_data_dir, serialno)
adb_push("third_party/nnlib/libhexagon_controller.so",
phone_data_dir, serialno)
mace_model_phone_path = ""
if build_type == BuildType.proto:
mace_model_phone_path = "%s/%s.pb" % (phone_data_dir, model_tag)
adb_push(mace_model_path,
mace_model_phone_path,
serialno)
if linkshared == 1:
adb_push("%s/libmace.so" % shared_library_dir, phone_data_dir,
serialno)
adb_push("%s/libgnustl_shared.so" % shared_library_dir,
phone_data_dir,
serialno)
adb_push("%s/%s" % (target_dir, target_name), phone_data_dir,
serialno)
stdout_buff = []
process_output = make_output_processor(stdout_buff)
adb_cmd = [
"LD_LIBRARY_PATH=%s" % phone_data_dir,
"MACE_TUNING=%s" % int(tuning),
"MACE_OUT_OF_RANGE_CHECK=%s" % int(out_of_range_check),
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"MACE_RUN_PARAMETER_PATH=%s/mace_run.config" % phone_data_dir,
"MACE_INTERNAL_STORAGE_PATH=%s" % internal_storage_dir,
"MACE_LIMIT_OPENCL_KERNEL_TIME=%s" % limit_opencl_kernel_time,
"MACE_RUNTIME_FAILURE_RATIO=%f" % runtime_failure_ratio,
]
if address_sanitizer:
adb_cmd.extend([
"LD_PRELOAD=%s/%s" % (phone_data_dir,
asan_rt_library_names(abi))
])
adb_cmd.extend([
"%s/%s" % (phone_data_dir, target_name),
"--model_name=%s" % model_tag,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (phone_data_dir, input_file_name),
"--output_file=%s/%s" % (phone_data_dir, output_file_name),
"--model_data_file=%s/%s.data" % (phone_data_dir, model_tag),
"--device=%s" % device_type,
"--round=%s" % running_round,
"--restart_round=%s" % restart_round,
"--omp_num_threads=%s" % omp_num_threads,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_phone_path,
"--opencl_binary_file=%s/%s" %
(phone_data_dir, os.path.basename(opencl_binary_file)),
])
adb_cmd = ' '.join(adb_cmd)
cmd_file_name = "%s-%s-%s" % ('cmd_file', model_tag, str(time.time()))
adb_cmd_file = "%s/%s" % (phone_data_dir, cmd_file_name)
tmp_cmd_file = "%s/%s" % ('/tmp', cmd_file_name)
with open(tmp_cmd_file, 'w') as cmd_file:
cmd_file.write(adb_cmd)
adb_push(tmp_cmd_file, adb_cmd_file, serialno)
os.remove(tmp_cmd_file)
sh.adb(
"-s",
serialno,
"shell",
"sh",
adb_cmd_file,
_tty_in=True,
_out=process_output,
_err_to_out=True)
stdout = "".join(stdout_buff)
if not stdout_success(stdout):
common.MaceLogger.error("Mace Run", "Mace run failed.")
sh.adb(
"-s",
serialno,
"shell",
"rm",
adb_cmd_file,
_fg=True)
print("Running finished!\n")
return stdout
def validate_model(abi,
serialno,
model_file_path,
weight_file_path,
platform,
device_type,
input_nodes,
output_nodes,
input_shapes,
output_shapes,
model_output_dir,
phone_data_dir,
caffe_env,
input_file_name="model_input",
output_file_name="model_out"):
print("* Validate with %s" % platform)
if abi != "host":
for output_name in output_nodes:
formatted_name = common.formatted_file_name(
output_file_name, output_name)
if os.path.exists("%s/%s" % (model_output_dir,
formatted_name)):
sh.rm("-rf", "%s/%s" % (model_output_dir, formatted_name))
adb_pull("%s/%s" % (phone_data_dir, formatted_name),
model_output_dir, serialno)
if platform == "tensorflow":
validate(platform, model_file_path, "",
"%s/%s" % (model_output_dir, input_file_name),
"%s/%s" % (model_output_dir, output_file_name), device_type,
":".join(input_shapes), ":".join(output_shapes),
",".join(input_nodes), ",".join(output_nodes))
elif platform == "caffe":
image_name = "mace-caffe:latest"
container_name = "mace_caffe_validator"
if caffe_env == common.CaffeEnvType.LOCAL:
import imp
try:
imp.find_module('caffe')
except ImportError:
logger.error('There is no caffe python module.')
validate(platform, model_file_path, weight_file_path,
"%s/%s" % (model_output_dir, input_file_name),
"%s/%s" % (model_output_dir, output_file_name),
device_type,
":".join(input_shapes), ":".join(output_shapes),
",".join(input_nodes), ",".join(output_nodes))
elif caffe_env == common.CaffeEnvType.DOCKER:
docker_image_id = sh.docker("images", "-q", image_name)
if not docker_image_id:
print("Build caffe docker")
sh.docker("build", "-t", image_name,
"third_party/caffe")
container_id = sh.docker("ps", "-qa", "-f",
"name=%s" % container_name)
if container_id and not sh.docker("ps", "-qa", "--filter",
"status=running", "-f",
"name=%s" % container_name):
sh.docker("rm", "-f", container_name)
container_id = ""
if not container_id:
print("Run caffe container")
sh.docker(
"run",
"-d",
"-it",
"--name",
container_name,
image_name,
"/bin/bash")
for input_name in input_nodes:
formatted_input_name = common.formatted_file_name(
input_file_name, input_name)
sh.docker(
"cp",
"%s/%s" % (model_output_dir, formatted_input_name),
"%s:/mace" % container_name)
for output_name in output_nodes:
formatted_output_name = common.formatted_file_name(
output_file_name, output_name)
sh.docker(
"cp",
"%s/%s" % (model_output_dir, formatted_output_name),
"%s:/mace" % container_name)
model_file_name = os.path.basename(model_file_path)
weight_file_name = os.path.basename(weight_file_path)
sh.docker("cp", "tools/common.py", "%s:/mace" % container_name)
sh.docker("cp", "tools/validate.py", "%s:/mace" % container_name)
sh.docker("cp", model_file_path, "%s:/mace" % container_name)
sh.docker("cp", weight_file_path, "%s:/mace" % container_name)
sh.docker(
"exec",
container_name,
"python",
"-u",
"/mace/validate.py",
"--platform=caffe",
"--model_file=/mace/%s" % model_file_name,
"--weight_file=/mace/%s" % weight_file_name,
"--input_file=/mace/%s" % input_file_name,
"--mace_out_file=/mace/%s" % output_file_name,
"--device_type=%s" % device_type,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
_fg=True)
print("Validation done!\n")
def build_host_libraries(model_build_type, abi):
bazel_build("@com_google_protobuf//:protobuf_lite", abi=abi)
bazel_build("//mace/proto:mace_cc", abi=abi)
bazel_build("//mace/codegen:generated_opencl", abi=abi)
bazel_build("//mace/codegen:generated_tuning_params", abi=abi)
bazel_build("//mace/codegen:generated_version", abi=abi)
bazel_build("//mace/utils:utils", abi=abi)
bazel_build("//mace/core:core", abi=abi)
bazel_build("//mace/kernels:kernels", abi=abi)
bazel_build("//mace/ops:ops", abi=abi)
if model_build_type == BuildType.code:
bazel_build(
"//mace/codegen:generated_models",
abi=abi)
################################
# library
################################
def get_lib_path(target_soc, serial_num, abi, project_name, build_output_dir,
library_output_dir):
project_output_dir = "%s/%s" % (build_output_dir, project_name)
library_dir = "%s/%s" % (project_output_dir, library_output_dir)
model_bin_dir = "%s/%s/" % (library_dir, abi)
if abi == "host":
lib_path = "%s/libmace_%s.a" % \
(model_bin_dir, project_name)
else:
if not target_soc:
lib_path = "%s/libmace_%s.a" % \
(model_bin_dir, project_name)
else:
device_name = adb_get_device_name_by_serialno(serial_num)
lib_path = "%s/libmace_%s.%s.%s.a" % \
(model_bin_dir, project_name,
device_name, target_soc)
return lib_path
def merge_libs(target_soc,
serial_num,
abi,
project_name,
build_output_dir,
library_output_dir,
model_build_type,
hexagon_mode):
print("* Merge mace lib")
project_output_dir = "%s/%s" % (build_output_dir, project_name)
hexagon_lib_file = "third_party/nnlib/libhexagon_controller.so"
library_dir = "%s/%s" % (project_output_dir, library_output_dir)
model_bin_dir = "%s/%s/" % (library_dir, abi)
if not os.path.exists(model_bin_dir):
sh.mkdir("-p", model_bin_dir)
if hexagon_mode:
sh.cp("-f", hexagon_lib_file, library_dir)
lib_path = get_lib_path(target_soc, serial_num, abi,
project_name, build_output_dir, library_output_dir)
# make static library
mri_stream = ""
if abi == "host":
mri_stream += "create %s\n" % lib_path
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_opencl.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_tuning_params.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_version.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/core/libcore.pic.lo\n")
mri_stream += (
"addlib "
"bazel-bin/mace/kernels/libkernels.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/utils/libutils.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/proto/libmace_cc.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/external/com_google_protobuf/libprotobuf_lite.pic.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/ops/libops.pic.lo\n")
if model_build_type == BuildType.code:
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_models.pic.a\n")
else:
mri_stream += "create %s\n" % lib_path
if model_build_type == BuildType.code:
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_models.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_opencl.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_tuning_params.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/codegen/libgenerated_version.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/core/libcore.lo\n")
mri_stream += (
"addlib "
"bazel-bin/mace/kernels/libkernels.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/utils/libutils.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/proto/libmace_cc.a\n")
mri_stream += (
"addlib "
"bazel-bin/external/com_google_protobuf/libprotobuf_lite.a\n")
mri_stream += (
"addlib "
"bazel-bin/mace/ops/libops.lo\n")
mri_stream += "save\n"
mri_stream += "end\n"
cmd = sh.Command("%s/toolchains/" % os.environ["ANDROID_NDK_HOME"] +
"aarch64-linux-android-4.9/prebuilt/linux-x86_64/" +
"bin/aarch64-linux-android-ar")
cmd("-M", _in=mri_stream)
print("Libs merged!\n")
def packaging_lib(libmace_output_dir, project_name):
print("* Package libs for %s" % project_name)
tar_package_name = "libmace_%s.tar.gz" % project_name
project_dir = "%s/%s" % (libmace_output_dir, project_name)
tar_package_path = "%s/%s" % (project_dir, tar_package_name)
if os.path.exists(tar_package_path):
sh.rm("-rf", tar_package_path)
print("Start packaging '%s' libs into %s" % (project_name,
tar_package_path))
sh.tar(
"cvzf",
"%s" % tar_package_path,
glob.glob("%s/*" % project_dir),
"--exclude",
"%s/_tmp" % project_dir,
_fg=True)
print("Packaging Done!\n")
################################
# example
################################
def build_example(target_soc,
serial_num,
abi,
project_name,
build_output_dir,
library_output_dir,
model_output_dir,
build_type,
hexagon_mode,
enable_openmp,
linkshared=False):
static_lib_name = "mace/libmace.a"
if not linkshared:
target_name = "example_static"
lib_path = get_lib_path(target_soc, serial_num, abi, project_name,
build_output_dir, library_output_dir)
sh.cp("-f", lib_path, static_lib_name)
else:
target_name = "example_shared"
example_target = "//mace/examples/cli:%s" % target_name
if build_type == BuildType.code:
build_arg = "--per_file_copt=//mace/examples/cli:example.cc@-DCODE_TYPE" # noqa
else:
build_arg = ""
bazel_build(example_target,
abi=abi,
enable_openmp=enable_openmp,
hexagon_mode=hexagon_mode,
extra_args=build_arg)
example_binary_file = "%s/%s" % (model_output_dir, target_name)
if os.path.exists(example_binary_file):
sh.rm("-rf", example_binary_file)
target_bin = "/".join(bazel_target_to_bin(example_target))
sh.cp("-f", target_bin, model_output_dir)
sh.rm("-rf", static_lib_name)
################################
# benchmark
################################
def build_benchmark_model(abi,
model_output_dir,
hexagon_mode,
enable_openmp,
linkshared=False):
if not linkshared:
target_name = "benchmark_model_static"
else:
target_name = "benchmark_model_shared"
benchmark_target = "//mace/benchmark:%s" % target_name
bazel_build(benchmark_target,
abi=abi,
enable_openmp=enable_openmp,
hexagon_mode=hexagon_mode)
benchmark_binary_file = "%s/%s" % (model_output_dir, target_name)
if os.path.exists(benchmark_binary_file):
sh.rm("-rf", benchmark_binary_file)
target_bin = "/".join(bazel_target_to_bin(benchmark_target))
sh.cp("-f", target_bin, model_output_dir)
def benchmark_model(abi,
serialno,
benchmark_binary_dir,
vlog_level,
embed_model_data,
model_output_dir,
mace_model_dir,
input_nodes,
output_nodes,
input_shapes,
output_shapes,
model_tag,
device_type,
phone_data_dir,
build_type,
opencl_binary_file,
shared_library_dir,
omp_num_threads=-1,
cpu_affinity_policy=1,
gpu_perf_hint=3,
gpu_priority_hint=3,
input_file_name="model_input",
linkshared=0):
print("* Benchmark for %s" % model_tag)
if linkshared == 0:
benchmark_model_target = "benchmark_model_static"
else:
benchmark_model_target = "benchmark_model_shared"
mace_model_path = ""
if build_type == BuildType.proto:
mace_model_path = "%s/%s.pb" % (mace_model_dir, model_tag)
if abi == "host":
p = subprocess.Popen(
[
"env",
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"%s/%s" % (benchmark_binary_dir, benchmark_model_target),
"--model_name=%s" % model_tag,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (model_output_dir, input_file_name),
"--model_data_file=%s/%s.data" % (mace_model_dir, model_tag),
"--device=%s" % device_type,
"--omp_num_threads=%s" % omp_num_threads,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
])
p.wait()
else:
sh.adb("-s", serialno, "shell", "mkdir", "-p", phone_data_dir)
internal_storage_dir = create_internal_storage_dir(
serialno, phone_data_dir)
for input_name in input_nodes:
formatted_name = common.formatted_file_name(input_file_name,
input_name)
adb_push("%s/%s" % (model_output_dir, formatted_name),
phone_data_dir, serialno)
if not embed_model_data:
adb_push("%s/%s.data" % (mace_model_dir, model_tag),
phone_data_dir, serialno)
if device_type == common.DeviceType.GPU \
and os.path.exists(opencl_binary_file):
adb_push(opencl_binary_file, phone_data_dir, serialno)
mace_model_phone_path = ""
if build_type == BuildType.proto:
mace_model_phone_path = "%s/%s.pb" % (phone_data_dir, model_tag)
adb_push(mace_model_path,
mace_model_phone_path,
serialno)
if linkshared == 1:
adb_push("%s/libmace.so" % shared_library_dir, phone_data_dir,
serialno)
adb_push("%s/libgnustl_shared.so" % shared_library_dir,
phone_data_dir,
serialno)
adb_push("%s/%s" % (benchmark_binary_dir, benchmark_model_target),
phone_data_dir,
serialno)
adb_cmd = [
"LD_LIBRARY_PATH=%s" % phone_data_dir,
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"MACE_RUN_PARAMETER_PATH=%s/mace_run.config" %
phone_data_dir,
"MACE_INTERNAL_STORAGE_PATH=%s" % internal_storage_dir,
"MACE_OPENCL_PROFILING=1",
"%s/%s" % (phone_data_dir, benchmark_model_target),
"--model_name=%s" % model_tag,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (phone_data_dir, input_file_name),
"--model_data_file=%s/%s.data" % (phone_data_dir, model_tag),
"--device=%s" % device_type,
"--omp_num_threads=%s" % omp_num_threads,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_phone_path,
"--opencl_binary_file=%s/%s" %
(phone_data_dir, os.path.basename(opencl_binary_file)),
]
adb_cmd = ' '.join(adb_cmd)
cmd_file_name = "%s-%s-%s" % ('cmd_file', model_tag, str(time.time()))
adb_cmd_file = "%s/%s" % (phone_data_dir, cmd_file_name)
tmp_cmd_file = "%s/%s" % ('/tmp', cmd_file_name)
with open(tmp_cmd_file, 'w') as cmd_file:
cmd_file.write(adb_cmd)
adb_push(tmp_cmd_file, adb_cmd_file, serialno)
os.remove(tmp_cmd_file)
sh.adb(
"-s",
serialno,
"shell",
"sh",
adb_cmd_file,
_fg=True)
sh.adb(
"-s",
serialno,
"shell",
"rm",
adb_cmd_file,
_fg=True)
print("Benchmark done!\n")
def build_run_throughput_test(abi,
serialno,
vlog_level,
run_seconds,
merged_lib_file,
model_input_dir,
embed_model_data,
input_nodes,
output_nodes,
input_shapes,
output_shapes,
cpu_model_tag,
gpu_model_tag,
dsp_model_tag,
phone_data_dir,
strip="always",
input_file_name="model_input"):
print("* Build and run throughput_test")
model_tag_build_flag = ""
if cpu_model_tag:
model_tag_build_flag += "--copt=-DMACE_CPU_MODEL_TAG=%s " % \
cpu_model_tag
if gpu_model_tag:
model_tag_build_flag += "--copt=-DMACE_GPU_MODEL_TAG=%s " % \
gpu_model_tag
if dsp_model_tag:
model_tag_build_flag += "--copt=-DMACE_DSP_MODEL_TAG=%s " % \
dsp_model_tag
sh.cp("-f", merged_lib_file, "mace/benchmark/libmace_merged.a")
sh.bazel(
"build",
"-c",
"opt",
"--strip",
strip,
"--verbose_failures",
"//mace/benchmark:model_throughput_test",
"--crosstool_top=//external:android/crosstool",
"--host_crosstool_top=@bazel_tools//tools/cpp:toolchain",
"--cpu=%s" % abi,
"--copt=-std=c++11",
"--copt=-D_GLIBCXX_USE_C99_MATH_TR1",
"--copt=-Werror=return-type",
"--copt=-O3",
"--define",
"neon=true",
"--define",
"openmp=true",
model_tag_build_flag,
_fg=True)
sh.rm("mace/benchmark/libmace_merged.a")
sh.adb("-s",
serialno,
"shell",
"mkdir",
"-p",
phone_data_dir)
adb_push("%s/%s_%s" % (model_input_dir, input_file_name,
",".join(input_nodes)),
phone_data_dir,
serialno)
adb_push("bazel-bin/mace/benchmark/model_throughput_test",
phone_data_dir,
serialno)
if not embed_model_data:
adb_push("codegen/models/%s/%s.data" % cpu_model_tag,
phone_data_dir,
serialno)
adb_push("codegen/models/%s/%s.data" % gpu_model_tag,
phone_data_dir,
serialno)
adb_push("codegen/models/%s/%s.data" % dsp_model_tag,
phone_data_dir,
serialno)
adb_push("third_party/nnlib/libhexagon_controller.so",
phone_data_dir,
serialno)
sh.adb(
"-s",
serialno,
"shell",
"LD_LIBRARY_PATH=%s" % phone_data_dir,
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"MACE_RUN_PARAMETER_PATH=%s/mace_run.config" %
phone_data_dir,
"%s/model_throughput_test" % phone_data_dir,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (phone_data_dir, input_file_name),
"--cpu_model_data_file=%s/%s.data" % (phone_data_dir,
cpu_model_tag),
"--gpu_model_data_file=%s/%s.data" % (phone_data_dir,
gpu_model_tag),
"--dsp_model_data_file=%s/%s.data" % (phone_data_dir,
dsp_model_tag),
"--run_seconds=%s" % run_seconds,
_fg=True)
print("throughput_test done!\n")
################################
# falcon
################################
def falcon_tags(tags_dict):
tags = ""
for k, v in tags_dict.iteritems():
if tags == "":
tags = "%s=%s" % (k, v)
else:
tags = tags + ",%s=%s" % (k, v)
return tags
def falcon_push_metrics(server, metrics, endpoint="mace_dev", tags={}):
cli = falcon_cli.FalconCli.connect(server=server, port=8433, debug=False)
ts = int(time.time())
falcon_metrics = [{
"endpoint": endpoint,
"metric": key,
"tags": falcon_tags(tags),
"timestamp": ts,
"value": value,
"step": 600,
"counterType": "GAUGE"
} for key, value in metrics.iteritems()]
cli.update(falcon_metrics)
|
py | 1a4900cc8a72c03153dff586bcabd087bd9375a1 | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara.tensor as at
import numpy as np
from aesara.tensor.random.basic import (
RandomVariable,
bernoulli,
betabinom,
binomial,
categorical,
geometric,
hypergeometric,
nbinom,
poisson,
)
from scipy import stats
import pymc as pm
from pymc.aesaraf import floatX, intX, take_along_axis
from pymc.distributions.dist_math import (
betaln,
binomln,
check_parameters,
factln,
log_diff_normal_cdf,
logpow,
normal_lccdf,
normal_lcdf,
)
from pymc.distributions.distribution import Discrete
from pymc.distributions.logprob import logcdf, logp
from pymc.distributions.shape_utils import rv_size_is_none
from pymc.math import sigmoid
__all__ = [
"Binomial",
"BetaBinomial",
"Bernoulli",
"DiscreteWeibull",
"Poisson",
"NegativeBinomial",
"Constant",
"ZeroInflatedPoisson",
"ZeroInflatedBinomial",
"ZeroInflatedNegativeBinomial",
"DiscreteUniform",
"Geometric",
"HyperGeometric",
"Categorical",
"OrderedLogistic",
"OrderedProbit",
]
class Binomial(Discrete):
R"""
Binomial log-likelihood.
The discrete probability distribution of the number of successes
in a sequence of n independent yes/no experiments, each of which
yields success with probability p.
The pmf of this distribution is
.. math:: f(x \mid n, p) = \binom{n}{x} p^x (1-p)^{n-x}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 22)
ns = [10, 17]
ps = [0.5, 0.7]
for n, p in zip(ns, ps):
pmf = st.binom.pmf(x, n, p)
plt.plot(x, pmf, '-o', label='n = {}, p = {}'.format(n, p))
plt.xlabel('x', fontsize=14)
plt.ylabel('f(x)', fontsize=14)
plt.legend(loc=1)
plt.show()
======== ==========================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n p`
Variance :math:`n p (1 - p)`
======== ==========================================
Parameters
----------
n: int
Number of Bernoulli trials (n >= 0).
p: float
Probability of success in each trial (0 < p < 1).
"""
rv_op = binomial
@classmethod
def dist(cls, n, p, *args, **kwargs):
n = at.as_tensor_variable(intX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([n, p], **kwargs)
def get_moment(rv, size, n, p):
mean = at.round(n * p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, n, p):
r"""
Calculate log-probability of Binomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, n)),
-np.inf,
binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),
)
return check_parameters(res, 0 < n, 0 <= p, p <= 1, msg="n > 0, 0 <= p <= 1")
def logcdf(value, n, p):
"""
Compute the log of the cumulative distribution function for Binomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
value = at.floor(value)
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, n),
at.log(at.betainc(n - value, value + 1, 1 - p)),
0,
),
)
return check_parameters(
res,
0 < n,
0 <= p,
p <= 1,
msg="n > 0, 0 <= p <= 1",
)
class BetaBinomial(Discrete):
R"""
Beta-binomial log-likelihood.
Equivalent to binomial random variable with success probability
drawn from a beta distribution.
The pmf of this distribution is
.. math::
f(x \mid \alpha, \beta, n) =
\binom{n}{x}
\frac{B(x + \alpha, n - x + \beta)}{B(\alpha, \beta)}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def BetaBinom(a, b, n, x):
pmf = special.binom(n, x) * (special.beta(x+a, n-x+b) / special.beta(a, b))
return pmf
x = np.arange(0, 11)
alphas = [0.5, 1, 2.3]
betas = [0.5, 1, 2]
n = 10
for a, b in zip(alphas, betas):
pmf = BetaBinom(a, b, n, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\beta$ = {}, n = {}'.format(a, b, n))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=9)
plt.show()
======== =================================================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n \dfrac{\alpha}{\alpha + \beta}`
Variance :math:`n \dfrac{\alpha \beta}{(\alpha+\beta)^2 (\alpha+\beta+1)}`
======== =================================================================
Parameters
----------
n: int
Number of Bernoulli trials (n >= 0).
alpha: float
alpha > 0.
beta: float
beta > 0.
"""
rv_op = betabinom
@classmethod
def dist(cls, alpha, beta, n, *args, **kwargs):
alpha = at.as_tensor_variable(floatX(alpha))
beta = at.as_tensor_variable(floatX(beta))
n = at.as_tensor_variable(intX(n))
return super().dist([n, alpha, beta], **kwargs)
def get_moment(rv, size, n, alpha, beta):
mean = at.round((n * alpha) / (alpha + beta))
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, n, alpha, beta):
r"""
Calculate log-probability of BetaBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, n)),
-np.inf,
binomln(n, value) + betaln(value + alpha, n - value + beta) - betaln(alpha, beta),
)
return check_parameters(res, n >= 0, alpha > 0, beta > 0, msg="n >= 0, alpha > 0, beta > 0")
def logcdf(value, n, alpha, beta):
"""
Compute the log of the cumulative distribution function for BetaBinomial distribution
at the specified value.
Parameters
----------
value: numeric
Value for which log CDF is calculated.
Returns
-------
TensorVariable
"""
# logcdf can only handle scalar values at the moment
if np.ndim(value):
raise TypeError(
f"BetaBinomial.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
safe_lower = at.switch(at.lt(value, 0), value, 0)
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, n),
at.logsumexp(
logp(
BetaBinomial.dist(alpha=alpha, beta=beta, n=n),
at.arange(safe_lower, value + 1),
),
keepdims=False,
),
0,
),
)
return check_parameters(res, 0 <= n, 0 < alpha, 0 < beta, msg="n >= 0, alpha > 0, beta > 0")
class Bernoulli(Discrete):
R"""Bernoulli log-likelihood
The Bernoulli distribution describes the probability of successes
(x=1) and failures (x=0).
The pmf of this distribution is
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = [0, 1]
for p in [0, 0.5, 0.8]:
pmf = st.bernoulli.pmf(x, p)
plt.plot(x, pmf, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=9)
plt.show()
======== ======================
Support :math:`x \in \{0, 1\}`
Mean :math:`p`
Variance :math:`p (1 - p)`
======== ======================
The bernoulli distribution can be parametrized either in terms of p or logit_p.
The link between the parametrizations is given by
.. math:: logit(p) = ln(\frac{p}{1-p})
Parameters
----------
p: float
Probability of success (0 < p < 1).
logit_p: float
Alternative log odds for the probability of success.
"""
rv_op = bernoulli
@classmethod
def dist(cls, p=None, logit_p=None, *args, **kwargs):
if p is not None and logit_p is not None:
raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.")
elif p is None and logit_p is None:
raise ValueError("Incompatible parametrization. Must specify either p or logit_p.")
if logit_p is not None:
p = at.sigmoid(logit_p)
p = at.as_tensor_variable(floatX(p))
return super().dist([p], **kwargs)
def get_moment(rv, size, p):
if not rv_size_is_none(size):
p = at.full(size, p)
return at.switch(p < 0.5, 0, 1)
def logp(value, p):
r"""
Calculate log-probability of Bernoulli distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, 1)),
-np.inf,
at.switch(value, at.log(p), at.log1p(-p)),
)
return check_parameters(res, p >= 0, p <= 1, msg="0 <= p <= 1")
def logcdf(value, p):
"""
Compute the log of the cumulative distribution function for Bernoulli distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, 1),
at.log1p(-p),
0,
),
)
return check_parameters(res, 0 <= p, p <= 1, msg="0 <= p <= 1")
class DiscreteWeibullRV(RandomVariable):
name = "discrete_weibull"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "int64"
_print_name = ("dWeibull", "\\operatorname{dWeibull}")
@classmethod
def rng_fn(cls, rng, q, beta, size):
p = rng.uniform(size=size)
return np.ceil(np.power(np.log(1 - p) / np.log(q), 1.0 / beta)) - 1
discrete_weibull = DiscreteWeibullRV()
class DiscreteWeibull(Discrete):
R"""Discrete Weibull log-likelihood
The discrete Weibull distribution is a flexible model of count data that
can handle both over- and under-dispersion.
The pmf of this distribution is
.. math:: f(x \mid q, \beta) = q^{x^{\beta}} - q^{(x + 1)^{\beta}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def DiscreteWeibull(q, b, x):
return q**(x**b) - q**((x + 1)**b)
x = np.arange(0, 10)
qs = [0.1, 0.9, 0.9]
betas = [0.3, 1.3, 3]
for q, b in zip(qs, betas):
pmf = DiscreteWeibull(q, b, x)
plt.plot(x, pmf, '-o', label=r'q = {}, $\beta$ = {}'.format(q, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ======================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu = \sum_{x = 1}^{\infty} q^{x^{\beta}}`
Variance :math:`2 \sum_{x = 1}^{\infty} x q^{x^{\beta}} - \mu - \mu^2`
======== ======================
"""
rv_op = discrete_weibull
@classmethod
def dist(cls, q, beta, *args, **kwargs):
q = at.as_tensor_variable(floatX(q))
beta = at.as_tensor_variable(floatX(beta))
return super().dist([q, beta], **kwargs)
def logp(value, q, beta):
r"""
Calculate log-probability of DiscreteWeibull distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log(at.power(q, at.power(value, beta)) - at.power(q, at.power(value + 1, beta))),
)
return check_parameters(res, 0 < q, q < 1, 0 < beta, msg="0 < q < 1, beta > 0")
def logcdf(value, q, beta):
"""
Compute the log of the cumulative distribution function for Discrete Weibull distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log1p(-at.power(q, at.power(value + 1, beta))),
)
return check_parameters(res, 0 < q, q < 1, 0 < beta, msg="0 < q < 1, beta > 0")
class Poisson(Discrete):
R"""
Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
The pmf of this distribution is
.. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 15)
for m in [0.5, 3, 8]:
pmf = st.poisson.pmf(x, m)
plt.plot(x, pmf, '-o', label='$\mu$ = {}'.format(m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\mu`
======== ==========================
Parameters
----------
mu: float
Expected number of occurrences during the given interval
(mu >= 0).
Notes
-----
The Poisson distribution can be derived as a limiting case of the
binomial distribution.
"""
rv_op = poisson
@classmethod
def dist(cls, mu, *args, **kwargs):
mu = at.as_tensor_variable(floatX(mu))
return super().dist([mu], *args, **kwargs)
def get_moment(rv, size, mu):
mu = at.floor(mu)
if not rv_size_is_none(size):
mu = at.full(size, mu)
return mu
def logp(value, mu):
r"""
Calculate log-probability of Poisson distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
logpow(mu, value) - factln(value) - mu,
)
log_prob = check_parameters(res, mu >= 0, msg="mu >= 0")
# Return zero when mu and value are both zero
return at.switch(at.eq(mu, 0) * at.eq(value, 0), 0, log_prob)
def logcdf(value, mu):
"""
Compute the log of the cumulative distribution function for Poisson distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
value = at.floor(value)
# Avoid C-assertion when the gammaincc function is called with invalid values (#4340)
safe_mu = at.switch(at.lt(mu, 0), 0, mu)
safe_value = at.switch(at.lt(value, 0), 0, value)
res = (
at.switch(
at.lt(value, 0),
-np.inf,
at.log(at.gammaincc(safe_value + 1, safe_mu)),
),
)
return check_parameters(res, 0 <= mu, msg="mu >= 0")
class NegativeBinomial(Discrete):
R"""
Negative binomial log-likelihood.
The negative binomial distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
The pmf of this distribution is
.. math::
f(x \mid \mu, \alpha) =
\binom{x + \alpha - 1}{x}
(\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def NegBinom(a, m, x):
pmf = special.binom(x + a - 1, x) * (a / (m + a))**a * (m / (m + a))**x
return pmf
x = np.arange(0, 22)
alphas = [0.9, 2, 4]
mus = [1, 2, 8]
for a, m in zip(alphas, mus):
pmf = NegBinom(a, m, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\mu$ = {}'.format(a, m))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
======== ==========================
The negative binomial distribution can be parametrized either in terms of mu or p,
and either in terms of alpha or n. The link between the parametrizations is given by
.. math::
\mu &= \frac{n(1-p)}{p} \\
\alpha &= n
Parameters
----------
mu: float
Poission distribution parameter (mu > 0).
alpha: float
Gamma distribution parameter (alpha > 0).
p: float
Alternative probability of success in each trial (0 < p < 1).
n: float
Alternative number of target success trials (n > 0)
"""
rv_op = nbinom
@classmethod
def dist(cls, mu=None, alpha=None, p=None, n=None, *args, **kwargs):
n, p = cls.get_n_p(mu=mu, alpha=alpha, p=p, n=n)
n = at.as_tensor_variable(floatX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([n, p], *args, **kwargs)
@classmethod
def get_n_p(cls, mu=None, alpha=None, p=None, n=None):
if n is None:
if alpha is not None:
n = alpha
else:
raise ValueError("Incompatible parametrization. Must specify either alpha or n.")
elif alpha is not None:
raise ValueError("Incompatible parametrization. Can't specify both alpha and n.")
if p is None:
if mu is not None:
p = n / (mu + n)
else:
raise ValueError("Incompatible parametrization. Must specify either mu or p.")
elif mu is not None:
raise ValueError("Incompatible parametrization. Can't specify both mu and p.")
return n, p
def get_moment(rv, size, n, p):
mu = at.floor(n * (1 - p) / p)
if not rv_size_is_none(size):
mu = at.full(size, mu)
return mu
def logp(value, n, p):
r"""
Calculate log-probability of NegativeBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
alpha = n
mu = alpha * (1 - p) / p
res = at.switch(
at.lt(value, 0),
-np.inf,
(
binomln(value + alpha - 1, value)
+ logpow(mu / (mu + alpha), value)
+ logpow(alpha / (mu + alpha), alpha)
),
)
negbinom = check_parameters(
res,
mu > 0,
alpha > 0,
msg="mu > 0, alpha > 0",
)
# Return Poisson when alpha gets very large.
return at.switch(at.gt(alpha, 1e10), logp(Poisson.dist(mu=mu), value), negbinom)
def logcdf(value, n, p):
"""
Compute the log of the cumulative distribution function for NegativeBinomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log(at.betainc(n, at.floor(value) + 1, p)),
)
return check_parameters(
res,
0 < n,
0 <= p,
p <= 1,
msg="0 < n, 0 <= p <= 1",
)
class Geometric(Discrete):
R"""
Geometric log-likelihood.
The probability that the first success in a sequence of Bernoulli
trials occurs on the x'th trial.
The pmf of this distribution is
.. math:: f(x \mid p) = p(1-p)^{x-1}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(1, 11)
for p in [0.1, 0.25, 0.75]:
pmf = st.geom.pmf(x, p)
plt.plot(x, pmf, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in \mathbb{N}_{>0}`
Mean :math:`\dfrac{1}{p}`
Variance :math:`\dfrac{1 - p}{p^2}`
======== =============================
Parameters
----------
p: float
Probability of success on an individual trial (0 < p <= 1).
"""
rv_op = geometric
@classmethod
def dist(cls, p, *args, **kwargs):
p = at.as_tensor_variable(floatX(p))
return super().dist([p], *args, **kwargs)
def get_moment(rv, size, p):
mean = at.round(1.0 / p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, p):
r"""
Calculate log-probability of Geometric distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 1),
-np.inf,
at.log(p) + logpow(1 - p, value - 1),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
def logcdf(value, p):
"""
Compute the log of the cumulative distribution function for Geometric distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.log1mexp(at.log1p(-p) * value),
)
return check_parameters(
res,
0 <= p,
p <= 1,
msg="0 <= p <= 1",
)
class HyperGeometric(Discrete):
R"""
Discrete hypergeometric distribution.
The probability of :math:`x` successes in a sequence of :math:`n` bernoulli
trials taken without replacement from a population of :math:`N` objects,
containing :math:`k` good (or successful or Type I) objects.
The pmf of this distribution is
.. math:: f(x \mid N, n, k) = \frac{\binom{k}{x}\binom{N-k}{n-x}}{\binom{N}{n}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(1, 15)
N = 50
k = 10
for n in [20, 25]:
pmf = st.hypergeom.pmf(x, N, k, n)
plt.plot(x, pmf, '-o', label='n = {}'.format(n))
plt.plot(x, pmf, '-o', label='N = {}'.format(N))
plt.plot(x, pmf, '-o', label='k = {}'.format(k))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== =============================
Support :math:`x \in \left[\max(0, n - N + k), \min(k, n)\right]`
Mean :math:`\dfrac{nk}{N}`
Variance :math:`\dfrac{(N-n)nk(N-k)}{(N-1)N^2}`
======== =============================
Parameters
----------
N : integer
Total size of the population
k : integer
Number of successful individuals in the population
n : integer
Number of samples drawn from the population
"""
rv_op = hypergeometric
@classmethod
def dist(cls, N, k, n, *args, **kwargs):
good = at.as_tensor_variable(intX(k))
bad = at.as_tensor_variable(intX(N - k))
n = at.as_tensor_variable(intX(n))
return super().dist([good, bad, n], *args, **kwargs)
def get_moment(rv, size, good, bad, n):
N, k = good + bad, good
mode = at.floor((n + 1) * (k + 1) / (N + 2))
if not rv_size_is_none(size):
mode = at.full(size, mode)
return mode
def logp(value, good, bad, n):
r"""
Calculate log-probability of HyperGeometric distribution at specified value.
Parameters
----------
value : numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
tot = good + bad
result = (
betaln(good + 1, 1)
+ betaln(bad + 1, 1)
+ betaln(tot - n + 1, n + 1)
- betaln(value + 1, good - value + 1)
- betaln(n - value + 1, bad - n + value + 1)
- betaln(tot + 1, 1)
)
# value in [max(0, n - N + k), min(k, n)]
lower = at.switch(at.gt(n - tot + good, 0), n - tot + good, 0)
upper = at.switch(at.lt(good, n), good, n)
res = at.switch(
at.lt(value, lower),
-np.inf,
at.switch(
at.le(value, upper),
result,
-np.inf,
),
)
return check_parameters(res, lower <= upper, msg="lower <= upper")
def logcdf(value, good, bad, n):
"""
Compute the log of the cumulative distribution function for HyperGeometric distribution
at the specified value.
Parameters
----------
value: numeric
Value for which log CDF is calculated.
Returns
-------
TensorVariable
"""
# logcdf can only handle scalar values at the moment
if np.ndim(value):
raise TypeError(
f"HyperGeometric.logcdf expects a scalar value but received a {np.ndim(value)}-dimensional object."
)
N = good + bad
# TODO: Use lower upper in locgdf for smarter logsumexp?
safe_lower = at.switch(at.lt(value, 0), value, 0)
res = at.switch(
at.lt(value, 0),
-np.inf,
at.switch(
at.lt(value, n),
at.logsumexp(
HyperGeometric.logp(at.arange(safe_lower, value + 1), good, bad, n),
keepdims=False,
),
0,
),
)
return check_parameters(
res,
0 < N,
0 <= good,
0 <= n,
good <= N,
n <= N,
msg="N > 0, 0 <= good <= N, 0 <= n <= N",
)
class DiscreteUniformRV(RandomVariable):
name = "discrete_uniform"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "int64"
_print_name = ("DiscreteUniform", "\\operatorname{DiscreteUniform}")
@classmethod
def rng_fn(cls, rng, lower, upper, size=None):
return stats.randint.rvs(lower, upper + 1, size=size, random_state=rng)
discrete_uniform = DiscreteUniformRV()
class DiscreteUniform(Discrete):
R"""
Discrete uniform distribution.
The pmf of this distribution is
.. math:: f(x \mid lower, upper) = \frac{1}{upper-lower+1}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
ls = [1, -2]
us = [6, 2]
for l, u in zip(ls, us):
x = np.arange(l, u+1)
pmf = [1.0 / (u - l + 1)] * len(x)
plt.plot(x, pmf, '-o', label='lower = {}, upper = {}'.format(l, u))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0, 0.4)
plt.legend(loc=1)
plt.show()
======== ===============================================
Support :math:`x \in {lower, lower + 1, \ldots, upper}`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== ===============================================
Parameters
----------
lower: int
Lower limit.
upper: int
Upper limit (upper > lower).
"""
rv_op = discrete_uniform
@classmethod
def dist(cls, lower, upper, *args, **kwargs):
lower = intX(at.floor(lower))
upper = intX(at.floor(upper))
return super().dist([lower, upper], **kwargs)
def get_moment(rv, size, lower, upper):
mode = at.maximum(at.floor((upper + lower) / 2.0), lower)
if not rv_size_is_none(size):
mode = at.full(size, mode)
return mode
def logp(value, lower, upper):
r"""
Calculate log-probability of DiscreteUniform distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, lower), at.gt(value, upper)),
-np.inf,
at.fill(value, -at.log(upper - lower + 1)),
)
return check_parameters(res, lower <= upper, msg="lower <= upper")
def logcdf(value, lower, upper):
"""
Compute the log of the cumulative distribution function for Discrete uniform distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.le(value, lower),
-np.inf,
at.switch(
at.lt(value, upper),
at.log(at.minimum(at.floor(value), upper) - lower + 1) - at.log(upper - lower + 1),
0,
),
)
return check_parameters(res, lower <= upper, msg="lower <= upper")
class Categorical(Discrete):
R"""
Categorical log-likelihood.
The most general discrete distribution. The pmf of this distribution is
.. math:: f(x \mid p) = p_x
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
ps = [[0.1, 0.6, 0.3], [0.3, 0.1, 0.1, 0.5]]
for p in ps:
x = range(len(p))
plt.plot(x, p, '-o', label='p = {}'.format(p))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.ylim(0)
plt.legend(loc=1)
plt.show()
======== ===================================
Support :math:`x \in \{0, 1, \ldots, |p|-1\}`
======== ===================================
Parameters
----------
p: array of floats
p > 0 and the elements of p must sum to 1. They will be automatically
rescaled otherwise.
"""
rv_op = categorical
@classmethod
def dist(cls, p, **kwargs):
p = at.as_tensor_variable(floatX(p))
return super().dist([p], **kwargs)
def get_moment(rv, size, p):
mode = at.argmax(p, axis=-1)
if not rv_size_is_none(size):
mode = at.full(size, mode)
return mode
def logp(value, p):
r"""
Calculate log-probability of Categorical distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or `TensorVariable`
"""
k = at.shape(p)[-1]
p_ = p
p = p_ / at.sum(p_, axis=-1, keepdims=True)
value_clip = at.clip(value, 0, k - 1)
if p.ndim > 1:
if p.ndim > value_clip.ndim:
value_clip = at.shape_padleft(value_clip, p_.ndim - value_clip.ndim)
elif p.ndim < value_clip.ndim:
p = at.shape_padleft(p, value_clip.ndim - p_.ndim)
pattern = (p.ndim - 1,) + tuple(range(p.ndim - 1))
a = at.log(
take_along_axis(
p.dimshuffle(pattern),
value_clip,
)
)
else:
a = at.log(p[value_clip])
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, k - 1)),
-np.inf,
a,
)
return check_parameters(
res, at.all(p_ >= 0, axis=-1), at.all(p <= 1, axis=-1), msg="0 <= p <=1"
)
class ConstantRV(RandomVariable):
name = "constant"
ndim_supp = 0
ndims_params = [0]
dtype = "floatX" # Should be treated as a discrete variable!
_print_name = ("Constant", "\\operatorname{Constant}")
@classmethod
def rng_fn(cls, rng, c, size=None):
if size is None:
return c.copy()
return np.full(size, c)
constant = ConstantRV()
class Constant(Discrete):
r"""
Constant log-likelihood.
Parameters
----------
value: float or int
Constant parameter.
"""
rv_op = constant
@classmethod
def dist(cls, c, *args, **kwargs):
c = at.as_tensor_variable(floatX(c))
return super().dist([c], **kwargs)
def get_moment(rv, size, c):
if not rv_size_is_none(size):
c = at.full(size, c)
return c
def logp(value, c):
r"""
Calculate log-probability of Constant distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
return at.switch(
at.eq(value, c),
at.zeros_like(value),
-np.inf,
)
class ZeroInflatedPoissonRV(RandomVariable):
name = "zero_inflated_poisson"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "int64"
_print_name = ("ZeroInflatedPois", "\\operatorname{ZeroInflatedPois}")
@classmethod
def rng_fn(cls, rng, psi, lam, size):
return rng.poisson(lam, size=size) * (rng.random(size=size) < psi)
zero_inflated_poisson = ZeroInflatedPoissonRV()
class ZeroInflatedPoisson(Discrete):
R"""
Zero-inflated Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
The pmf of this distribution is
.. math::
f(x \mid \psi, \theta) = \left\{ \begin{array}{l}
(1-\psi) + \psi e^{-\theta}, \text{if } x = 0 \\
\psi \frac{e^{-\theta}\theta^x}{x!}, \text{if } x=1,2,3,\ldots
\end{array} \right.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 22)
psis = [0.7, 0.4]
thetas = [8, 4]
for psi, theta in zip(psis, thetas):
pmf = st.poisson.pmf(x, theta)
pmf[0] = (1 - psi) + pmf[0]
pmf[1:] = psi * pmf[1:]
pmf /= pmf.sum()
plt.plot(x, pmf, '-o', label='$\\psi$ = {}, $\\theta$ = {}'.format(psi, theta))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\theta`
Variance :math:`\theta + \frac{1-\psi}{\psi}\theta^2`
======== ==========================
Parameters
----------
psi: float
Expected proportion of Poisson variates (0 < psi < 1)
theta: float
Expected number of occurrences during the given interval
(theta >= 0).
"""
rv_op = zero_inflated_poisson
@classmethod
def dist(cls, psi, theta, *args, **kwargs):
psi = at.as_tensor_variable(floatX(psi))
theta = at.as_tensor_variable(floatX(theta))
return super().dist([psi, theta], *args, **kwargs)
def get_moment(rv, size, psi, theta):
mean = at.floor(psi * theta)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, psi, theta):
r"""
Calculate log-probability of ZeroInflatedPoisson distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.gt(value, 0),
at.log(psi) + logp(Poisson.dist(mu=theta), value),
at.logaddexp(at.log1p(-psi), at.log(psi) - theta),
)
res = at.switch(at.lt(value, 0), -np.inf, res)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 <= theta,
msg="0 <= psi <= 1, theta >= 0",
)
def logcdf(value, psi, theta):
"""
Compute the log of the cumulative distribution function for ZeroInflatedPoisson distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.logaddexp(
at.log1p(-psi),
at.log(psi) + logcdf(Poisson.dist(mu=theta), value),
),
)
return check_parameters(
res, 0 <= psi, psi <= 1, 0 <= theta, msg="0 <= psi <= 1, theta >= 0"
)
class ZeroInflatedBinomialRV(RandomVariable):
name = "zero_inflated_binomial"
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = "int64"
_print_name = ("ZeroInflatedBinom", "\\operatorname{ZeroInflatedBinom}")
@classmethod
def rng_fn(cls, rng, psi, n, p, size):
return rng.binomial(n=n, p=p, size=size) * (rng.random(size=size) < psi)
zero_inflated_binomial = ZeroInflatedBinomialRV()
class ZeroInflatedBinomial(Discrete):
R"""
Zero-inflated Binomial log-likelihood.
The pmf of this distribution is
.. math::
f(x \mid \psi, n, p) = \left\{ \begin{array}{l}
(1-\psi) + \psi (1-p)^{n}, \text{if } x = 0 \\
\psi {n \choose x} p^x (1-p)^{n-x}, \text{if } x=1,2,3,\ldots,n
\end{array} \right.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import arviz as az
plt.style.use('arviz-darkgrid')
x = np.arange(0, 25)
ns = [10, 20]
ps = [0.5, 0.7]
psis = [0.7, 0.4]
for n, p, psi in zip(ns, ps, psis):
pmf = st.binom.pmf(x, n, p)
pmf[0] = (1 - psi) + pmf[0]
pmf[1:] = psi * pmf[1:]
pmf /= pmf.sum()
plt.plot(x, pmf, '-o', label='n = {}, p = {}, $\\psi$ = {}'.format(n, p, psi))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi n p`
Variance :math:`(1-\psi) n p [1 - p(1 - \psi n)].`
======== ==========================
Parameters
----------
psi: float
Expected proportion of Binomial variates (0 < psi < 1)
n: int
Number of Bernoulli trials (n >= 0).
p: float
Probability of success in each trial (0 < p < 1).
"""
rv_op = zero_inflated_binomial
@classmethod
def dist(cls, psi, n, p, *args, **kwargs):
psi = at.as_tensor_variable(floatX(psi))
n = at.as_tensor_variable(intX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([psi, n, p], *args, **kwargs)
def get_moment(rv, size, psi, n, p):
mean = at.round(psi * n * p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, psi, n, p):
r"""
Calculate log-probability of ZeroInflatedBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.gt(value, 0),
at.log(psi) + logp(Binomial.dist(n=n, p=p), value),
at.logaddexp(at.log1p(-psi), at.log(psi) + n * at.log1p(-p)),
)
res = at.switch(
at.lt(value, 0),
-np.inf,
res,
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 <= p,
p <= 1,
msg="0 <= psi <= 1, 0 <= p <= 1",
)
def logcdf(value, psi, n, p):
"""
Compute the log of the cumulative distribution function for ZeroInflatedBinomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.or_(at.lt(value, 0), at.gt(value, n)),
-np.inf,
at.logaddexp(
at.log1p(-psi),
at.log(psi) + logcdf(Binomial.dist(n=n, p=p), value),
),
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 <= p,
p <= 1,
msg="0 <= psi <= 1, 0 <= p <= 1",
)
class ZeroInflatedNegBinomialRV(RandomVariable):
name = "zero_inflated_neg_binomial"
ndim_supp = 0
ndims_params = [0, 0, 0]
dtype = "int64"
_print_name = (
"ZeroInflatedNegBinom",
"\\operatorname{ZeroInflatedNegBinom}",
)
@classmethod
def rng_fn(cls, rng, psi, n, p, size):
return rng.negative_binomial(n=n, p=p, size=size) * (rng.random(size=size) < psi)
zero_inflated_neg_binomial = ZeroInflatedNegBinomialRV()
class ZeroInflatedNegativeBinomial(Discrete):
R"""
Zero-Inflated Negative binomial log-likelihood.
The Zero-inflated version of the Negative Binomial (NB).
The NB distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
The pmf of this distribution is
.. math::
f(x \mid \psi, \mu, \alpha) = \left\{
\begin{array}{l}
(1-\psi) + \psi \left (
\frac{\alpha}{\alpha+\mu}
\right) ^\alpha, \text{if } x = 0 \\
\psi \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} \left (
\frac{\alpha}{\mu+\alpha}
\right)^\alpha \left(
\frac{\mu}{\mu+\alpha}
\right)^x, \text{if } x=1,2,3,\ldots
\end{array}
\right.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from scipy import special
import arviz as az
plt.style.use('arviz-darkgrid')
def ZeroInfNegBinom(a, m, psi, x):
pmf = special.binom(x + a - 1, x) * (a / (m + a))**a * (m / (m + a))**x
pmf[0] = (1 - psi) + pmf[0]
pmf[1:] = psi * pmf[1:]
pmf /= pmf.sum()
return pmf
x = np.arange(0, 25)
alphas = [2, 4]
mus = [2, 8]
psis = [0.7, 0.7]
for a, m, psi in zip(alphas, mus, psis):
pmf = ZeroInfNegBinom(a, m, psi, x)
plt.plot(x, pmf, '-o', label=r'$\alpha$ = {}, $\mu$ = {}, $\psi$ = {}'.format(a, m, psi))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\mu`
Var :math:`\psi\mu + \left (1 + \frac{\mu}{\alpha} + \frac{1-\psi}{\mu} \right)`
======== ==========================
The zero inflated negative binomial distribution can be parametrized
either in terms of mu or p, and either in terms of alpha or n.
The link between the parametrizations is given by
.. math::
\mu &= \frac{n(1-p)}{p} \\
\alpha &= n
Parameters
----------
psi: float
Expected proportion of NegativeBinomial variates (0 < psi < 1)
mu: float
Poission distribution parameter (mu > 0).
alpha: float
Gamma distribution parameter (alpha > 0).
p: float
Alternative probability of success in each trial (0 < p < 1).
n: float
Alternative number of target success trials (n > 0)
"""
rv_op = zero_inflated_neg_binomial
@classmethod
def dist(cls, psi, mu=None, alpha=None, p=None, n=None, *args, **kwargs):
psi = at.as_tensor_variable(floatX(psi))
n, p = NegativeBinomial.get_n_p(mu=mu, alpha=alpha, p=p, n=n)
n = at.as_tensor_variable(floatX(n))
p = at.as_tensor_variable(floatX(p))
return super().dist([psi, n, p], *args, **kwargs)
def get_moment(rv, size, psi, n, p):
mean = at.floor(psi * n * (1 - p) / p)
if not rv_size_is_none(size):
mean = at.full(size, mean)
return mean
def logp(value, psi, n, p):
r"""
Calculate log-probability of ZeroInflatedNegativeBinomial distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or Aesara tensor
Returns
-------
TensorVariable
"""
res = at.switch(
at.gt(value, 0),
at.log(psi) + logp(NegativeBinomial.dist(n=n, p=p), value),
at.logaddexp(at.log1p(-psi), at.log(psi) + n * at.log(p)),
)
res = at.switch(
at.lt(value, 0),
-np.inf,
res,
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 < n,
0 <= p,
p <= 1,
msg="0 <= psi <= 1, n > 0, 0 <= p <= 1",
)
def logcdf(value, psi, n, p):
"""
Compute the log of the cumulative distribution function for ZeroInflatedNegativeBinomial distribution
at the specified value.
Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or Aesara tensor.
Returns
-------
TensorVariable
"""
res = at.switch(
at.lt(value, 0),
-np.inf,
at.logaddexp(
at.log1p(-psi),
at.log(psi) + logcdf(NegativeBinomial.dist(n=n, p=p), value),
),
)
return check_parameters(
res,
0 <= psi,
psi <= 1,
0 < n,
0 < p,
p <= 1,
msg="0 <= psi <= 1, n > 0, 0 < p <= 1",
)
class _OrderedLogistic(Categorical):
r"""
Underlying class for ordered logistic distributions.
See docs for the OrderedLogistic wrapper class for more details on how to use it in models.
"""
rv_op = categorical
@classmethod
def dist(cls, eta, cutpoints, *args, **kwargs):
eta = at.as_tensor_variable(floatX(eta))
cutpoints = at.as_tensor_variable(cutpoints)
pa = sigmoid(cutpoints - at.shape_padright(eta))
p_cum = at.concatenate(
[
at.zeros_like(at.shape_padright(pa[..., 0])),
pa,
at.ones_like(at.shape_padright(pa[..., 0])),
],
axis=-1,
)
p = p_cum[..., 1:] - p_cum[..., :-1]
return super().dist(p, *args, **kwargs)
class OrderedLogistic:
R"""
Wrapper class for Ordered Logistic distributions.
Useful for regression on ordinal data values whose values range
from 1 to K as a function of some predictor, :math:`\eta`. The
cutpoints, :math:`c`, separate which ranges of :math:`\eta` are
mapped to which of the K observed dependent variables. The number
of cutpoints is K - 1. It is recommended that the cutpoints are
constrained to be ordered.
.. math::
f(k \mid \eta, c) = \left\{
\begin{array}{l}
1 - \text{logit}^{-1}(\eta - c_1)
\,, \text{if } k = 0 \\
\text{logit}^{-1}(\eta - c_{k - 1}) -
\text{logit}^{-1}(\eta - c_{k})
\,, \text{if } 0 < k < K \\
\text{logit}^{-1}(\eta - c_{K - 1})
\,, \text{if } k = K \\
\end{array}
\right.
Parameters
----------
eta: float
The predictor.
cutpoints: array
The length K - 1 array of cutpoints which break :math:`\eta` into
ranges. Do not explicitly set the first and last elements of
:math:`c` to negative and positive infinity.
compute_p: boolean, default True
Whether to compute and store in the trace the inferred probabilities of each categories,
based on the cutpoints' values. Defaults to True.
Might be useful to disable it if memory usage is of interest.
Examples
--------
.. code-block:: python
# Generate data for a simple 1 dimensional example problem
n1_c = 300; n2_c = 300; n3_c = 300
cluster1 = np.random.randn(n1_c) + -1
cluster2 = np.random.randn(n2_c) + 0
cluster3 = np.random.randn(n3_c) + 2
x = np.concatenate((cluster1, cluster2, cluster3))
y = np.concatenate((1*np.ones(n1_c),
2*np.ones(n2_c),
3*np.ones(n3_c))) - 1
# Ordered logistic regression
with pm.Model() as model:
cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2,
transform=pm.distributions.transforms.ordered)
y_ = pm.OrderedLogistic("y", cutpoints=cutpoints, eta=x, observed=y)
idata = pm.sample()
# Plot the results
plt.hist(cluster1, 30, alpha=0.5);
plt.hist(cluster2, 30, alpha=0.5);
plt.hist(cluster3, 30, alpha=0.5);
posterior = idata.posterior.stack(sample=("chain", "draw"))
plt.hist(posterior["cutpoints"][0], 80, alpha=0.2, color='k');
plt.hist(posterior["cutpoints"][1], 80, alpha=0.2, color='k');
"""
def __new__(cls, name, *args, compute_p=True, **kwargs):
out_rv = _OrderedLogistic(name, *args, **kwargs)
if compute_p:
pm.Deterministic(f"{name}_probs", out_rv.owner.inputs[3], dims=kwargs.get("dims"))
return out_rv
@classmethod
def dist(cls, *args, **kwargs):
return _OrderedLogistic.dist(*args, **kwargs)
class _OrderedProbit(Categorical):
r"""
Underlying class for ordered probit distributions.
See docs for the OrderedProbit wrapper class for more details on how to use it in models.
"""
rv_op = categorical
@classmethod
def dist(cls, eta, cutpoints, sigma=1, *args, **kwargs):
eta = at.as_tensor_variable(floatX(eta))
cutpoints = at.as_tensor_variable(cutpoints)
probits = at.shape_padright(eta) - cutpoints
_log_p = at.concatenate(
[
at.shape_padright(normal_lccdf(0, sigma, probits[..., 0])),
log_diff_normal_cdf(0, sigma, probits[..., :-1], probits[..., 1:]),
at.shape_padright(normal_lcdf(0, sigma, probits[..., -1])),
],
axis=-1,
)
_log_p = at.as_tensor_variable(floatX(_log_p))
p = at.exp(_log_p)
return super().dist(p, *args, **kwargs)
class OrderedProbit:
R"""
Wrapper class for Ordered Probit distributions.
Useful for regression on ordinal data values whose values range
from 1 to K as a function of some predictor, :math:`\eta`. The
cutpoints, :math:`c`, separate which ranges of :math:`\eta` are
mapped to which of the K observed dependent variables. The number
of cutpoints is K - 1. It is recommended that the cutpoints are
constrained to be ordered.
In order to stabilize the computation, log-likelihood is computed
in log space using the scaled error function `erfcx`.
.. math::
f(k \mid \eta, c) = \left\{
\begin{array}{l}
1 - \text{normal_cdf}(0, \sigma, \eta - c_1)
\,, \text{if } k = 0 \\
\text{normal_cdf}(0, \sigma, \eta - c_{k - 1}) -
\text{normal_cdf}(0, \sigma, \eta - c_{k})
\,, \text{if } 0 < k < K \\
\text{normal_cdf}(0, \sigma, \eta - c_{K - 1})
\,, \text{if } k = K \\
\end{array}
\right.
Parameters
----------
eta: float
The predictor.
cutpoints: array
The length K - 1 array of cutpoints which break :math:`\eta` into
ranges. Do not explicitly set the first and last elements of
:math:`c` to negative and positive infinity.
sigma: float, default 1.0
Standard deviation of the probit function.
compute_p: boolean, default True
Whether to compute and store in the trace the inferred probabilities of each categories,
based on the cutpoints' values. Defaults to True.
Might be useful to disable it if memory usage is of interest.
Example
--------
.. code:: python
# Generate data for a simple 1 dimensional example problem
n1_c = 300; n2_c = 300; n3_c = 300
cluster1 = np.random.randn(n1_c) + -1
cluster2 = np.random.randn(n2_c) + 0
cluster3 = np.random.randn(n3_c) + 2
x = np.concatenate((cluster1, cluster2, cluster3))
y = np.concatenate((1*np.ones(n1_c),
2*np.ones(n2_c),
3*np.ones(n3_c))) - 1
# Ordered probit regression
with pm.Model() as model:
cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2,
transform=pm.distributions.transforms.ordered)
y_ = pm.OrderedProbit("y", cutpoints=cutpoints, eta=x, observed=y)
idata = pm.sample()
# Plot the results
plt.hist(cluster1, 30, alpha=0.5);
plt.hist(cluster2, 30, alpha=0.5);
plt.hist(cluster3, 30, alpha=0.5);
posterior = idata.posterior.stack(sample=("chain", "draw"))
plt.hist(posterior["cutpoints"][0], 80, alpha=0.2, color='k');
plt.hist(posterior["cutpoints"][1], 80, alpha=0.2, color='k');
"""
def __new__(cls, name, *args, compute_p=True, **kwargs):
out_rv = _OrderedProbit(name, *args, **kwargs)
if compute_p:
pm.Deterministic(f"{name}_probs", out_rv.owner.inputs[3], dims=kwargs.get("dims"))
return out_rv
@classmethod
def dist(cls, *args, **kwargs):
return _OrderedProbit.dist(*args, **kwargs)
|
py | 1a49017f661695700bfc8d9ce6a75c8ed2feaf0d | #!/usr/bin/env python3
# Complete the maxSubsetSum function below.
def maxSubsetSum(arr):
if len(arr) == 0: return 0
if len(arr) == 1: return arr[0]
arr[0] = max(0, arr[0])
arr[1] = max(arr[0], arr[1])
for i in range(2, len(arr)):
arr[i] = max(arr[i - 1], arr[i] + arr[i - 2])
return arr[-1]
if __name__ == "__main__":
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = maxSubsetSum(arr)
print(res)
|
py | 1a4901bd1956438ce241b15f3200ebfacf8ea837 | # Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common string utilities used by various parts of core."""
import re
def camel_case_split(identifier: str) -> str:
"""Split camel case string."""
regex = '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)'
matches = re.finditer(regex, identifier)
return ' '.join([m.group(0) for m in matches])
|
py | 1a49026242918a67084d33ddb05f3444d38e529a | lista = []
jogador = dict()
golslist = []
total = 0
while True:
jogador.clear()
golslist.clear()
jogador['nome'] = str(input('Nome do jogador: '))
jogador['partidas'] = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for i in range(0, jogador['partidas']):
gol = int(input(f'Quantos gols na partida {i}? '))
golslist.append(gol)
total += gol
jogador['gols'] = golslist[:]
jogador['total'] = total
lista.append(jogador.copy())
continuar = str(input('Quer continuar? [S/N] '))
if continuar in 'Nn':
break
print('-='*20)
for i in jogador.keys():
print(f'{i:<12}', end='')
print()
print('-'*40)
for k, v in enumerate(lista):
print(f'{k:<4} ', end='')
for d in v.values():
print(f'{str(d):<10}', end='')
print()
while True:
a = int(input('Mostrar dados de qual jogador? '))
if a == 999:
print('<< VOLTE SEMPRE >>')
break
elif a >= len(lista):
print('ERRO, digite um numero valido')
else:
print(f'--LEVANTAMENTO DO JOGADOR {lista[a]["nome"]}.')
for p in range(0, lista[a]['partidas']):
print(f'No jogo {p} fez {lista[a]["gols"][p]} gols.')
print('-'*30)
|
py | 1a49026e8768d8e9d0e474bc26a2975f3048d449 | from distutils.core import setup
import setuptools
setup(
name='regcensus',
version='0.2.4',
description='Python package for accessing data from the QuantGov API',
url='https://github.com/QuantGov/regcensus-api-python',
author='QuantGov',
author_email='[email protected]',
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'pandas',
'requests'
],
)
|
py | 1a4905ff03f1b5a83163458cfcf12f106c78dbfe | # flake8: noqa
""""
The data module contains exactly what you'd expect: everything related to data
processing, collection, and storage.
"""
from .database import db
from .database import cache
from .database import celery_app
db.metadata.clear()
# SqlAlchemy database models
from .boathouses import Boathouse
from .live_website_options import LiveWebsiteOptions
# External data calls
from . import usgs
from . import hobolink
|
py | 1a49074fc15c0ef8b594d3cb4afe30348af408c8 | import curses
import time
from sudoku import Sudoku
menu = ["Rules", "Play", "Exit"]
submenu = ["Easy", "Hard", "Exit"]
def intro_message():
welcome_message = """
Welcome to Sudoku
Rules:
All rows should have the digits 1-9, without repition.
All columns should have the digits 1-9, without repition.
All 9 sub-matrices should have the digits 1-9, without repition.
To play, enter the row, column, and answer at the command prompt. The
Format is: <row> <column> <value>
Type exit to leave
Please note this game uses 0 indexing
Good luck!\n
"""
return welcome_message
def print_subject(stdscr, w, text):
text_x = w // 2 - len(text) // 2
stdscr.addstr(5, text_x, text)
def print_menu(stdscr, curr_row, curr_menu, text):
stdscr.clear()
h, w = stdscr.getmaxyx()
title_x = w // 2 - len(text) // 2
stdscr.addstr(5, title_x, text)
for idx, row in enumerate(curr_menu):
x = w // 2 - len(row) // 2
y = h // 2 - len(menu) // 2 + idx
if idx == curr_row:
stdscr.attron(curses.color_pair(1))
stdscr.addstr(y, x, row)
stdscr.attroff(curses.color_pair(1))
else:
stdscr.addstr(y, x, row)
stdscr.refresh()
def print_center(stdscr, text):
stdscr.clear()
h, w = stdscr.getmaxyx()
x = w // 2 - len(text) // 2
y = h // 2
stdscr.addstr(y, x, text)
stdscr.refresh()
def sub_menu(stdscr):
submenu_row = 0
print_menu(stdscr, submenu_row, submenu, "Pick a Difficulty")
while True:
sub_key = stdscr.getch()
if sub_key == curses.KEY_UP and submenu_row > 0:
submenu_row -= 1
elif sub_key == curses.KEY_DOWN and submenu_row < len(submenu) - 1:
submenu_row += 1
if sub_key == sub_key in [10, 13]:
if submenu[submenu_row] == "Easy":
print_center(stdscr, "'{}' selected".format(submenu[submenu_row]))
start_game(submenu[submenu_row])
elif submenu[submenu_row] == "Hard":
print_center(stdscr, "'{}' selected".format(submenu[submenu_row]))
start_game(submenu[submenu_row])
elif submenu[submenu_row] == "Exit":
print_center(stdscr, "'{}' selected".format(submenu[submenu_row]))
return
print_menu(stdscr, submenu_row, submenu, "Pick a Difficulty")
def start_game(difficulty):
time.sleep(1)
curses.nocbreak()
curses.echo()
curses.endwin()
Sudoku.run(difficulty)
def main(stdscr):
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
current_row = 0
print_menu(stdscr, current_row, menu, "Sudoku!")
while True:
key = stdscr.getch()
if key == curses.KEY_UP and current_row > 0:
current_row -= 1
elif key == curses.KEY_DOWN and current_row < len(menu) - 1:
current_row += 1
elif key == key in [10, 13]:
if menu[current_row] == "Rules":
stdscr.addstr(5, 5, intro_message())
stdscr.getch()
elif menu[current_row] == "Play":
sub_menu(stdscr)
elif menu[current_row] != "Exit":
print_center(stdscr, "'{}' selected".format(menu[current_row]))
stdscr.getch()
if current_row == len(menu) - 1:
break
print_menu(stdscr, current_row, menu, "Sudoku!")
if __name__ == "__main__":
curses.wrapper(main)
|
py | 1a490848fca405f3c5f6337fc58864c1b68fca1a | from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from guardian.shortcuts import assign_perm, remove_perm
from grandchallenge.cases.models import Image
from grandchallenge.reader_studies.models import Answer, ReaderStudy
from grandchallenge.reader_studies.tasks import add_scores
@receiver(m2m_changed, sender=ReaderStudy.images.through)
def update_image_permissions(instance, action, reverse, model, pk_set, **_):
"""
Assign or remove view permissions to the readers group when images
are added or remove to/from the reader study images. Handles reverse
relations and clearing.
"""
if action not in ["post_add", "post_remove", "pre_clear"]:
# nothing to do for the other actions
return
if reverse:
images = Image.objects.filter(pk=instance.pk)
if pk_set is None:
# When using a _clear action, pk_set is None
# https://docs.djangoproject.com/en/2.2/ref/signals/#m2m-changed
reader_studies = instance.readerstudies.all()
else:
reader_studies = model.objects.filter(pk__in=pk_set)
reader_studies = reader_studies.select_related("readers_group")
else:
reader_studies = [instance]
if pk_set is None:
# When using a _clear action, pk_set is None
# https://docs.djangoproject.com/en/2.2/ref/signals/#m2m-changed
images = instance.images.all()
else:
images = model.objects.filter(pk__in=pk_set)
op = assign_perm if "add" in action else remove_perm
for rs in reader_studies:
op("view_image", rs.readers_group, images)
@receiver(m2m_changed, sender=Answer.images.through)
def assign_score(instance, action, reverse, model, pk_set, **_):
if action != "post_add":
return
add_scores.apply_async(
kwargs={
"instance_pk": str(instance.pk),
"pk_set": list(map(str, pk_set)),
}
)
|
py | 1a49095bb8389d7ead750a83529cfb85ac62a63d | import os
import yaml
import collections
import logging
log = logging.getLogger(__name__)
# Recursive dictionary merge
# Copyright (C) 2016 Paul Durivage <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def dict_merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts
nested to an arbitrary depth, updating keys. The ``merge_dct`` is
merged into ``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (
k in dct
and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)
):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
class ConfigClass(dict):
""" This wrapper class allows easy loading and overloading variables of our
configuration
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def load_yaml(self, *args):
o = open(os.path.join(*args))
d = yaml.load(o.read(), Loader=yaml.SafeLoader)
dict_merge(self, d)
# load defaults
config = ConfigClass()
config.load_yaml(os.path.dirname(os.path.realpath(__file__)), "config-defaults.yaml")
try:
config.load_yaml(os.getcwd(), "config.yaml")
except Exception: # pragma: no cover
log.info("No config.yaml found in root directory! Using defaults ...")
|
py | 1a4909956af46331d992acfbb6ac8f428c013751 | """."""
import os
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
SCRIPT_ABS_DIR = os.sys.path[0]
os.sys.path.append(os.path.join(SCRIPT_ABS_DIR, '../../config'))
import my_py_picture as config # noqa
figure_width = config.MAX_FULL_PAGE_WIDTH * 0.8
FIGSIZE = (figure_width, figure_width * 0.3)
DPI = config.DPI_GRAPH
LAYOUT = dict(pad=0, h_pad=0, w_pad=2, rect=(0, 0, 1, 1))
PLOTS_WIDTH_RATIOS = [0.6, 1]
LABEL_OPTIONS = dict(config.LABEL_OPTIONS, x=0.03, y=1)
X_LABEL = 'Cuteness'
Y_LABEL = 'Excitement'
def main():
"""."""
x_a = np.linspace(0, 10, num=10)
y_a = [0] + [1] * 9
x_b = np.linspace(0, 10, num=10)
y_b = np.linspace(0, 10, num=10)
fig = plt.figure(figsize=FIGSIZE, dpi=DPI)
gs = gridspec.GridSpec(1, 2, width_ratios=PLOTS_WIDTH_RATIOS)
ax_a = fig.add_subplot(gs[0])
ax_a.plot(x_a, y_a)
ax_a.set_xlabel(X_LABEL)
ax_a.set_ylabel(Y_LABEL)
ax_a.text(s=r'\textbf{a}', transform=ax_a.transAxes, **LABEL_OPTIONS)
ax_b = fig.add_subplot(gs[1])
ax_b.plot(x_b, y_b)
ax_b.set_xlabel(X_LABEL)
ax_b.set_ylabel(Y_LABEL)
ax_b.text(s=r'\textbf{b}', transform=ax_b.transAxes, **LABEL_OPTIONS)
fig.tight_layout(**LAYOUT)
fig.savefig(config.get_figure_file_path(os.sys.argv[0]))
plt.close(fig)
if __name__ == '__main__':
main()
|
py | 1a4909bf9d006d391f6884b20faa45b506cdf10b | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient
from gbpservice.common import utils
from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.nfp.core import log as nfp_logging
import netaddr
from neutron._i18n import _LE
from neutron.api.v2 import attributes as attr
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.db import l3_db
from neutron.db.l3_db import DEVICE_OWNER_ROUTER_INTF
from neutron.db.l3_db import EXTERNAL_GW_INFO
from neutron.db.l3_db import RouterPort
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.plugins.common import constants as n_const
import neutron_fwaas.extensions
from neutron_fwaas.services.firewall import fwaas_plugin as ref_fw_plugin
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy import orm
from neutron_fwaas.db.firewall import firewall_db as n_firewall
LOG = nfp_logging.getLogger(__name__)
class NFPFirewallPlugin(ref_fw_plugin.FirewallPlugin):
def __init__(self):
# Monkey patch L3 agent topic
# L3 agent was where reference firewall agent runs
# patch that topic to the NFP firewall agent's topic name
ref_fw_plugin.f_const.L3_AGENT = topics.FW_NFP_CONFIGAGENT_TOPIC
#n_topics.L3_AGENT = topics.FW_NFP_CONFIGAGENT_TOPIC
# Ensure neutron fwaas extensions are loaded
ext_path = neutron_fwaas.extensions.__path__[0]
if ext_path not in cfg.CONF.api_extensions_path.split(':'):
cfg.CONF.set_override(
'api_extensions_path',
cfg.CONF.api_extensions_path + ':' + ext_path)
super(NFPFirewallPlugin, self).__init__()
# Modifying following plugin function, to relax same router validation
def _get_routers_for_create_firewall(self, tenant_id, context, firewall):
# pop router_id as this goes in the router association db
# and not firewall db
router_ids = firewall['firewall'].pop('router_ids', None)
if router_ids == attr.ATTR_NOT_SPECIFIED:
return tenant_id
def set_routers_for_firewall(self, context, fw):
"""Sets the routers associated with the fw."""
pass
def get_firewall_routers(self, context, fwid):
"""Gets all routers associated with a firewall."""
fw_rtrs = ['1234567890']
return fw_rtrs
def validate_firewall_routers_not_in_use(
self, context, router_ids, fwid=None):
"""Validate if router-ids not associated with any firewall.
If any of the router-ids in the list is already associated with
a firewall, raise an exception else just return.
"""
pass
def update_firewall_routers(self, context, fw):
"""Update the firewall with new routers.
This involves removing existing router associations and replacing
it with the new router associations provided in the update method.
"""
return fw
# Monkey patching the create_firewall db method
def create_firewall(self, context, firewall, status=None):
fw = firewall['firewall']
tenant_id = fw['tenant_id']
# distributed routers may required a more complex state machine;
# the introduction of a new 'CREATED' state allows this, whilst
# keeping a backward compatible behavior of the logical resource.
if not status:
status = n_const.PENDING_CREATE
with context.session.begin(subtransactions=True):
self._validate_fw_parameters(context, fw, tenant_id)
firewall_db = n_firewall.Firewall(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fw['name'],
description=fw['description'],
firewall_policy_id=fw['firewall_policy_id'],
admin_state_up=fw['admin_state_up'],
status=status)
context.session.add(firewall_db)
return self._make_firewall_dict(firewall_db)
n_firewall.Firewall_db_mixin.create_firewall = create_firewall
# Monkey patching l3_db's _get_router_for_floatingip method to associate
# floatingip if corresponding routes is present.
def _is_net_reachable_from_net(self, context, tenant_id, from_net_id,
to_net_id):
"""Check whether a network is reachable.
Follow the paths of networks connected by devices, to determine
whether a network is reachable from another.
@param context: neutron api request context
@param tenant_id: the owning tenant
@param from_net_id: the source network for the search
@param to_net_id: the destination network for the search
@return: True or False whether a path exists
"""
original_context = context
context = elevate_context(context)
tenant_id = context.tenant_id
def nexthop_nets_query(nets, visited):
"""query networks connected to devices on nets but not visited."""
Port = models_v2.Port
devices_on_nets = context.session.query(Port.device_id).filter(
Port.tenant_id == tenant_id,
Port.device_owner.notin_([l3_constants.DEVICE_OWNER_DHCP]),
Port.network_id.in_(nets)).subquery()
return context.session.query(Port.network_id).filter(
Port.tenant_id == tenant_id,
Port.network_id.notin_(visited),
Port.device_id.in_(devices_on_nets))
visited = set([])
nets = set([from_net_id])
while nets:
if to_net_id in nets:
context = original_context
return True
visited |= nets
nets = set((tup[0] for tup in nexthop_nets_query(nets, visited)))
context = original_context
return False
def _find_net_for_nexthop(self, context, tenant_id, router_id, nexthop):
"""Find the network to which the nexthop belongs.
Iterate over the router interfaces to find the network of nexthop.
@param context: neutron api request context
@param tenant_id: the owning tenant
@param router_id: a router id
@param nexthop: an IP address
@return: the network id of the nexthop or None if not found
"""
interfaces = context.session.query(models_v2.Port).filter_by(
tenant_id=tenant_id,
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF)
for interface in interfaces:
cidrs = [self._core_plugin._get_subnet(context,
ip['subnet_id'])['cidr']
for ip in interface['fixed_ips']]
if netaddr.all_matching_cidrs(nexthop, cidrs):
return interface['network_id']
def _find_routers_via_routes_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
"""Find routers with route to the internal IP address.
Iterate over the routers that belong to the same tenant as
'internal_port'. For each router check that the router is connected
to the external network and whether there is a route to the internal
IP address. Consider only routers for which there is a path from the
nexthop of the route to the internal port.
Sort the list of routers to have the router with the most specific
route first (largest CIDR prefix mask length).
@param context: neutron api request context
@param internal_port: the port dict for the association
@param internal_subnet_id: the subnet for the association
@param external_network_id: the network of the floatingip
@return: a sorted list of matching routers
"""
original_context = context
context = elevate_context(context)
internal_ip_address = [
ip['ip_address'] for ip in internal_port['fixed_ips']
if ip['subnet_id'] == internal_subnet_id
][0]
# find the tenant routers
tenant_id = internal_port['tenant_id']
routers = self.get_routers(context, filters={'tenant_id': [tenant_id]})
prefix_routers = []
for router in routers:
# verify that the router is on "external_network"
gw_info = router.get(EXTERNAL_GW_INFO)
if not gw_info or gw_info['network_id'] != external_network_id:
continue
# find a matching route
if 'routes' not in router:
continue
cidr_nexthops = {}
for route in router['routes']:
cidr = netaddr.IPNetwork(route['destination'])
if cidr not in cidr_nexthops:
cidr_nexthops[cidr] = []
cidr_nexthops[cidr].append(route['nexthop'])
smallest_cidr = netaddr.smallest_matching_cidr(
internal_ip_address,
cidr_nexthops.keys())
if not smallest_cidr:
continue
# validate that there exists a path to "internal_port"
for nexthop in cidr_nexthops[smallest_cidr]:
net_id = self._find_net_for_nexthop(context, context.tenant_id,
router['id'], nexthop)
if net_id and self._is_net_reachable_from_net(
context,
context.tenant_id,
net_id,
internal_port['network_id']):
prefix_routers.append(
(smallest_cidr.prefixlen, router['id']))
break
context = original_context
return [p_r[1] for p_r in sorted(prefix_routers, reverse=True)]
def elevate_context(context):
context = context.elevated()
context.tenant_id = _resource_owner_tenant_id()
return context
def _resource_owner_tenant_id():
user, pwd, tenant, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
try:
tenant = keystoneclient.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %s exists.'), tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %s'), tenant)
def _get_router_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
if not subnet['gateway_ip']:
msg = (_('Cannot add floating IP to port on subnet %s '
'which has no gateway_ip') % internal_subnet_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
# Find routers(with router_id and interface address) that
# connect given internal subnet and the external network.
# Among them, if the router's interface address matches
# with subnet's gateway-ip, return that router.
# Otherwise return the first router.
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(l3_constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet_id
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id).distinct()
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
router_ids = self._find_routers_via_routes_for_floatingip(
context,
internal_port,
internal_subnet_id,
external_network_id)
if router_ids:
return router_ids[0]
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet_id,
external_network_id=external_network_id,
port_id=internal_port['id'])
l3_db.L3_NAT_dbonly_mixin._get_router_for_floatingip = (
_get_router_for_floatingip)
l3_db.L3_NAT_dbonly_mixin._find_routers_via_routes_for_floatingip = (
_find_routers_via_routes_for_floatingip)
l3_db.L3_NAT_dbonly_mixin._find_net_for_nexthop = _find_net_for_nexthop
l3_db.L3_NAT_dbonly_mixin._is_net_reachable_from_net = (
_is_net_reachable_from_net)
|
py | 1a490c52fba792a66049811192a7d4588e0b1e5a | # Copyright (c) 2013 Pratik Kumar Sahu, Nagendra Chowdary, Anish Mathuria
# Ported to Python by Gallopsled
from __future__ import division
import os
import random
import struct
# +------------------------------------------------------------------------+
# | RANDOM NUMBERS FUNCTIONS |
# +------------------------------------------------------------------------+
# get a random integer i (0<=i<maxv)
# ==================================
def random_get_int(maxv):
return random.randrange(0, maxv)
def randel(arr):
return arr[random_get_int(len(arr))]
def enc_data_msn(c, i):
# c is the lsn to be encoded with a msn
# lsn = least significant nibble msn = most significant nibble
if c <= i:
if c == 0:
#Randomly select and return from {5,7}
return randel([5, 7])
else:
#Randomly select and return from {4,5,6,7}
return randel([4,5,6,7])
elif c == 0:
#Randomly select and return from {3,5,7}
return randel([3,5,7])
elif c <= 0x0A:
#Randomly select and return from {3,4,5,6,7}
#CSE Why doesn't the author use 3 below then?
return randel([4,5,6,7])
else:
return randel([4,6])
|
py | 1a490d226234f2aec3753c36eebff9b737782aa6 | """
Interfaces to various optimizers.
"""
from __future__ import print_function, division
import sys
from copy import copy
import warnings
# CRUFT: time.clock() removed from python 3.8
try:
from time import perf_counter
except ImportError:
from time import clock as perf_counter
import numpy as np
from . import monitor
from . import initpop
from . import lsqerror
from .history import History
from .formatnum import format_uncertainty
from .fitproblem import nllf_scale
from .dream import MCMCModel
class ConsoleMonitor(monitor.TimedUpdate):
"""
Display fit progress on the console
"""
def __init__(self, problem, progress=1, improvement=30):
monitor.TimedUpdate.__init__(self, progress=progress,
improvement=improvement)
self.problem = problem
def show_progress(self, history):
scale, err = nllf_scale(self.problem)
chisq = format_uncertainty(scale*history.value[0], err)
print("step", history.step[0], "cost", chisq)
sys.stdout.flush()
def show_improvement(self, history):
# print("step",history.step[0],"chisq",history.value[0])
p = self.problem.getp()
try:
self.problem.setp(history.point[0])
print(self.problem.summarize())
finally:
self.problem.setp(p)
sys.stdout.flush()
class CheckpointMonitor(monitor.TimedUpdate):
"""
Periodically save fit state so that it can be resumed later.
"""
#: Function to call at each checkpoint.
checkpoint = None # type: Callable[None, None]
def __init__(self, checkpoint, progress=60*30):
monitor.TimedUpdate.__init__(self, progress=progress,
improvement=np.inf)
self.checkpoint = checkpoint
self._first = True
def show_progress(self, history):
# Skip the first checkpoint since it only contains the
# start/resume state
if self._first:
self._first = False
else:
self.checkpoint(history)
def show_improvement(self, history):
pass
class StepMonitor(monitor.Monitor):
"""
Collect information at every step of the fit and save it to a file.
*fid* is the file to save the information to
*fields* is the list of "step|time|value|point" fields to save
The point field should be last in the list.
"""
FIELDS = ['step', 'time', 'value', 'point']
def __init__(self, problem, fid, fields=FIELDS):
if any(f not in self.FIELDS for f in fields):
raise ValueError("invalid monitor field")
self.fid = fid
self.fields = fields
self.problem = problem
self._pattern = "%%(%s)s\n" % (")s %(".join(fields))
fid.write("# " + ' '.join(fields) + '\n')
def config_history(self, history):
history.requires(time=1, value=1, point=1, step=1)
def __call__(self, history):
point = " ".join("%.15g" % v for v in history.point[0])
time = "%g" % history.time[0]
step = "%d" % history.step[0]
scale, _ = nllf_scale(self.problem)
value = "%.15g" % (scale * history.value[0])
out = self._pattern % dict(point=point, time=time,
value=value, step=step)
self.fid.write(out)
class MonitorRunner(object):
"""
Adaptor which allows solvers to accept progress monitors.
"""
def __init__(self, monitors, problem):
if monitors is None:
monitors = [ConsoleMonitor(problem)]
self.monitors = monitors
self.history = History(time=1, step=1, point=1, value=1,
population_points=1, population_values=1)
for M in self.monitors:
M.config_history(self.history)
self._start = perf_counter()
def __call__(self, step, point, value,
population_points=None, population_values=None):
self.history.update(time=perf_counter() - self._start,
step=step, point=point, value=value,
population_points=population_points,
population_values=population_values)
for M in self.monitors:
M(self.history)
class FitBase(object):
"""
FitBase defines the interface from bumps models to the various fitting
engines available within bumps.
Each engine is defined in its own class with a specific set of attributes
and methods.
The *name* attribute is the name of the optimizer. This is just a simple
string.
The *settings* attribute is a list of pairs (name, default), where the
names are defined as fields in FitOptions. A best attempt should be
made to map the fit options for the optimizer to the standard fit options,
since each of these becomes a new command line option when running
bumps. If that is not possible, then a new option should be added
to FitOptions. A plugin architecture might be appropriate here, if
there are reasons why specific problem domains might need custom fitters,
but this is not yet supported.
Each engine takes a fit problem in its constructor.
The :meth:`solve` method runs the fit. It accepts a
monitor to track updates, a mapper to distribute work and
key-value pairs defining the settings.
There are a number of optional methods for the fitting engines. Basically,
all the methods in :class:`FitDriver` first check if they are specialized
in the fit engine before performing a default action.
The *load*/*save* methods load and save the fitter state in a given
directory with a specific base file name. The fitter can choose a file
extension to add to the base name. Some care is needed to be sure that
the extension doesn't collide with other extensions such as .mon for
the fit monitor.
The *plot* method shows any plots to help understand the performance of
the fitter, such as a convergence plot showing the the range of values
in the population over time, as well as plots of the parameter uncertainty
if available. The plot should work within is given a figure canvas to work with
The *stderr*/*cov* methods should provide summary statistics for the
parameter uncertainties. Some fitters, such as MCMC, will compute these
directly from the population. Others, such as BFGS, will produce an
estimate of the uncertainty as they go along. If the fitter does not
provide these estimates, then they will be computed from numerical
derivatives at the minimum in the FitDriver method.
"""
def __init__(self, problem):
"""Fit the models and show the results"""
self.problem = problem
def solve(self, monitors=None, mapper=None, **options):
raise NotImplementedError()
class MultiStart(FitBase):
"""
Multi-start monte carlo fitter.
This fitter wraps a local optimizer, restarting it a number of times
to give it a chance to find a different local minimum. If the keep_best
option is True, then restart near the best fit, otherwise restart at
random.
"""
name = "Multistart Monte Carlo"
settings = [('starts', 100)]
def __init__(self, fitter):
FitBase.__init__(self, fitter.problem)
self.fitter = fitter
def solve(self, monitors=None, mapper=None, **options):
# TODO: need better way of tracking progress
import logging
starts = options.pop('starts', 1)
reset = not options.pop('keep_best', True)
f_best = np.inf
x_best = self.problem.getp()
for _ in range(max(starts, 1)):
logging.info("multistart round %d", _)
x, fx = self.fitter.solve(monitors=monitors, mapper=mapper,
**options)
if fx < f_best:
x_best, f_best = x, fx
logging.info("multistart f(x),x: %s %s", str(fx), str(x_best))
if reset:
self.problem.randomize()
else:
# Jitter
self.problem.setp(x_best)
pop = initpop.eps_init(1, self.problem.getp(),
self.problem.bounds(),
use_point=False, eps=1e-3)
self.problem.setp(pop[0])
return x_best, f_best
class DEFit(FitBase):
"""
Classic Storn and Price differential evolution optimizer.
"""
name = "Differential Evolution"
id = "de"
settings = [('steps', 1000), ('pop', 10), ('CR', 0.9), ('F', 2.0),
('ftol', 1e-8), ('xtol', 1e-6), #('stop', ''),
]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
from .mystic.optimizer import de
from .mystic.solver import Minimizer
from .mystic import stop
if monitors is None:
monitors = [ConsoleMonitor(self.problem)]
if mapper is not None:
_mapper = lambda p, v: mapper(v)
else:
_mapper = lambda p, v: list(map(self.problem.nllf, v))
resume = hasattr(self, 'state')
steps = options['steps'] + (self.state['step'][-1] if resume else 0)
strategy = de.DifferentialEvolution(npop=options['pop'],
CR=options['CR'],
F=options['F'],
crossover=de.c_bin,
mutate=de.rand1u)
success = parse_tolerance(options)
failure = stop.Steps(steps)
self.history = History()
# Step adds to current step number if resume
minimize = Minimizer(strategy=strategy, problem=self.problem,
history=self.history, monitors=monitors,
success=success, failure=failure)
if resume:
self.history.restore(self.state)
x = minimize(mapper=_mapper, abort_test=abort_test, resume=resume)
#print(minimize.termination_condition())
#with open("/tmp/evals","a") as fid:
# print >>fid,minimize.history.value[0],minimize.history.step[0],\
# minimize.history.step[0]*options['pop']*len(self.problem.getp())
return x, self.history.value[0]
def load(self, input_path):
self.state = load_history(input_path)
def save(self, output_path):
save_history(output_path, self.history.snapshot())
def parse_tolerance(options):
from .mystic import stop
if options.get('stop', ''):
return stop.parse_condition(options['stop'])
xtol, ftol = options['xtol'], options['ftol']
if xtol == 0:
if ftol == 0:
return None
if ftol < 0:
return stop.Rf(-ftol, scaled=True)
return stop.Rf(ftol, scaled=False)
else:
if xtol == 0:
return None
if xtol < 0:
return stop.Rx(-xtol, scaled=True)
return stop.Rx(xtol, scaled=False)
def _history_file(path):
return path + "-history.json"
def load_history(path):
"""
Load fitter details from a history file.
"""
import json
with open(_history_file(path), "r") as fid:
return json.load(fid)
def save_history(path, state):
"""
Save fitter details to a history file as JSON.
The content of the details are fitter specific.
"""
import json
with open(_history_file(path), "w") as fid:
json.dump(state, fid)
class BFGSFit(FitBase):
"""
BFGS quasi-newton optimizer.
BFGS estimates Hessian and its Cholesky decomposition, but initial
tests give uncertainties quite different from the directly computed
Jacobian in Levenburg-Marquardt or the Hessian estimated at the
minimum by numdifftools.
To use the internal 'H' and 'L' and save some computation time, then
use::
C = lsqerror.chol_cov(fit.result['L'])
stderr = lsqerror.stderr(C)
"""
name = "Quasi-Newton BFGS"
id = "newton"
settings = [('steps', 3000), ('starts', 1),
('ftol', 1e-6), ('xtol', 1e-12)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
from .quasinewton import quasinewton
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
result = quasinewton(fn=self.problem.nllf,
x0=self.problem.getp(),
monitor=self._monitor,
abort_test=abort_test,
itnlimit=options['steps'],
gradtol=options['ftol'],
steptol=1e-12,
macheps=1e-8,
eta=1e-8,
)
self.result = result
#code = result['status']
#from .quasinewton import STATUS
#print("%d: %s, x=%s, fx=%s"
# % (code, STATUS[code], result['x'], result['fx']))
return result['x'], result['fx']
def _monitor(self, step, x, fx):
self._update(step=step, point=x, value=fx,
population_points=[x],
population_values=[fx])
return True
class PSFit(FitBase):
"""
Particle swarm optimizer.
"""
name = "Particle Swarm"
id = "ps"
settings = [('steps', 3000), ('pop', 1)]
def solve(self, monitors=None, mapper=None, **options):
options = _fill_defaults(options, self.settings)
if mapper is None:
mapper = lambda x: list(map(self.problem.nllf, x))
from .random_lines import particle_swarm
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
low, high = self.problem.bounds()
cfo = dict(parallel_cost=mapper,
n=len(low),
x0=self.problem.getp(),
x1=low,
x2=high,
f_opt=0,
monitor=self._monitor)
npop = int(cfo['n'] * options['pop'])
result = particle_swarm(cfo, npop, maxiter=options['steps'])
satisfied_sc, n_feval, f_best, x_best = result
return x_best, f_best
def _monitor(self, step, x, fx, k):
self._update(step=step, point=x[:, k], value=fx[k],
population_points=x.T, population_values=fx)
return True
class RLFit(FitBase):
"""
Random lines optimizer.
"""
name = "Random Lines"
id = "rl"
settings = [('steps', 3000), ('starts', 20), ('pop', 0.5), ('CR', 0.9)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
if mapper is None:
mapper = lambda x: list(map(self.problem.nllf, x))
from .random_lines import random_lines
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
low, high = self.problem.bounds()
cfo = dict(parallel_cost=mapper,
n=len(low),
x0=self.problem.getp(),
x1=low,
x2=high,
f_opt=0,
monitor=self._monitor)
npop = max(int(cfo['n'] * options['pop']), 3)
result = random_lines(cfo, npop, abort_test=abort_test,
maxiter=options['steps'], CR=options['CR'])
satisfied_sc, n_feval, f_best, x_best = result
return x_best, f_best
def _monitor(self, step, x, fx, k):
# print "rl best",k, x.shape,fx.shape
self._update(step=step, point=x[:, k], value=fx[k],
population_points=x.T, population_values=fx)
return True
class PTFit(FitBase):
"""
Parallel tempering optimizer.
"""
name = "Parallel Tempering"
id = "pt"
settings = [('steps', 400), ('nT', 24), ('CR', 0.9),
('burn', 100), ('Tmin', 0.1), ('Tmax', 10)]
def solve(self, monitors=None, mapper=None, **options):
options = _fill_defaults(options, self.settings)
# TODO: no mapper??
from .partemp import parallel_tempering
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
t = np.logspace(np.log10(options['Tmin']),
np.log10(options['Tmax']),
options['nT'])
history = parallel_tempering(nllf=self.problem.nllf,
p=self.problem.getp(),
bounds=self.problem.bounds(),
# logfile="partemp.dat",
T=t,
CR=options['CR'],
steps=options['steps'],
burn=options['burn'],
monitor=self._monitor)
return history.best_point, history.best
def _monitor(self, step, x, fx, P, E):
self._update(step=step, point=x, value=fx,
population_points=P, population_values=E)
return True
class SimplexFit(FitBase):
"""
Nelder-Mead simplex optimizer.
"""
name = "Nelder-Mead Simplex"
id = "amoeba"
settings = [('steps', 1000), ('starts', 1), ('radius', 0.15),
('xtol', 1e-6), ('ftol', 1e-8)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from .simplex import simplex
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
# TODO: no mapper??
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
#print("bounds", self.problem.bounds())
result = simplex(f=self.problem.nllf, x0=self.problem.getp(),
bounds=self.problem.bounds(),
abort_test=abort_test,
update_handler=self._monitor,
maxiter=options['steps'],
radius=options['radius'],
xtol=options['xtol'],
ftol=options['ftol'])
# Let simplex propose the starting point for the next amoeba
# fit in a multistart amoeba context. If the best is always
# used, the fit can get stuck in a local minimum.
self.problem.setp(result.next_start)
#print("amoeba %s %s"%(result.x,result.fx))
return result.x, result.fx
def _monitor(self, k, n, x, fx):
self._update(step=k, point=x[0], value=fx[0],
population_points=x, population_values=fx)
return True
class MPFit(FitBase):
"""
MPFit optimizer.
"""
name = "MPFit"
id = "mp"
settings = [('steps', 200), ('ftol', 1e-10), ('xtol', 1e-10)]
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from .mpfit import mpfit
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
self._low, self._high = self.problem.bounds()
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
self._abort = abort_test
x0 = self.problem.getp()
parinfo = []
for low, high in zip(*self.problem.bounds()):
parinfo.append({
#'value': None, # passed in by xall instead
#'fixed': False, # everything is varying
'limited': (np.isfinite(low), np.isfinite(high)),
'limits': (low, high),
#'parname': '', # could probably ask problem for this...
# From the code, default step size is sqrt(eps)*abs(value)
# or eps if value is 0. This seems okay. The other
# other alternative is to limit it by bounds.
#'step': 0, # compute step automatically
#'mpside': 0, # 1, -1 or 2 for right-, left- or 2-sided deriv
#'mpmaxstep': 0., # max step for this parameter
#'tied': '', # parameter expressions tying fit parameters
#'mpprint': 1, # print the parameter value when iterating
})
result = mpfit(
fcn=self._residuals,
xall=x0,
parinfo=parinfo,
autoderivative=True,
fastnorm=True,
#damp=0, # no damping when damp=0
# Stopping conditions
ftol=options['ftol'],
xtol=options['xtol'],
#gtol=1e-100, # exclude gtol test
maxiter=options['steps'],
# Progress monitor
iterfunct=self._monitor,
nprint=1, # call monitor each iteration
quiet=True, # leave it to monitor to print any info
# Returns values
nocovar=True, # use our own covar calculation for consistency
)
if result.status > 0:
x, fx = result.params, result.fnorm
else:
x, fx = None, None
return x, fx
def _monitor(self, fcn, p, k, fnorm,
functkw=None, parinfo=None,
quiet=0, dof=None, **extra):
self._update(k, p, fnorm)
def _residuals(self, p, fjac=None):
if self._abort():
return -1, None
self.problem.setp(p)
# treat prior probabilities on the parameters as additional
# measurements
residuals = np.hstack(
(self.problem.residuals().flat, self.problem.parameter_residuals()))
# Tally costs for broken constraints
extra_cost = self.problem.constraints_nllf()
# Spread the cost over the residuals. Since we are smoothly increasing
# residuals as we leave the boundary, this should push us back into the
# boundary (within tolerance) during the lm fit.
residuals += np.sign(residuals) * (extra_cost / len(residuals))
return 0, residuals
class LevenbergMarquardtFit(FitBase):
"""
Levenberg-Marquardt optimizer.
"""
name = "Levenberg-Marquardt"
id = "lm"
settings = [('steps', 200), ('ftol', 1.5e-8), ('xtol', 1.5e-8)]
# LM also has
# gtol: orthoganality between jacobian columns
# epsfcn: numerical derivative step size
# factor: initial radius
# diag: variable scale factors to bring them near 1
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from scipy import optimize
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
self._low, self._high = self.problem.bounds()
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
x0 = self.problem.getp()
maxfev = options['steps']*(len(x0)+1)
result = optimize.leastsq(self._bounded_residuals,
x0,
ftol=options['ftol'],
xtol=options['xtol'],
maxfev=maxfev,
epsfcn=1e-8,
full_output=True)
x, cov_x, info, mesg, success = result
if not 1 <= success <= 4:
# don't treat "reached maxfev" as a true failure
if "reached maxfev" in mesg:
# unless the x values are bad
if not np.all(np.isfinite(x)):
x = None
mesg = "Levenberg-Marquardt fit failed with bad values"
else:
x = None
self._cov = cov_x if x is not None else None
# compute one last time with x forced inside the boundary, and using
# problem.nllf as returned by other optimizers. We will ignore the
# covariance output and calculate it again ourselves. Not ideal if
# f is expensive, but it will be consistent with other optimizers.
if x is not None:
x += self._stray_delta(x)
self.problem.setp(x)
fx = self.problem.nllf()
else:
fx = None
return x, fx
def _bounded_residuals(self, p):
# Force the fit point into the valid region
stray = self._stray_delta(p)
stray_cost = np.sum(stray**2)
if stray_cost > 0:
stray_cost += 1e6
self.problem.setp(p + stray)
# treat prior probabilities on the parameters as additional
# measurements
residuals = np.hstack(
(self.problem.residuals().flat, self.problem.parameter_residuals()))
# Tally costs for straying outside the boundaries plus other costs
extra_cost = stray_cost + self.problem.constraints_nllf()
# Spread the cost over the residuals. Since we are smoothly increasing
# residuals as we leave the boundary, this should push us back into the
# boundary (within tolerance) during the lm fit.
residuals += np.sign(residuals) * (extra_cost / len(residuals))
return residuals
def _stray_delta(self, p):
"""calculate how far point is outside the boundary"""
return (np.where(p < self._low, self._low - p, 0)
+ np.where(p > self._high, self._high - p, 0))
def cov(self):
return self._cov
class SnobFit(FitBase):
name = "SNOBFIT"
id = "snobfit"
settings = [('steps', 200)]
def solve(self, monitors=None, mapper=None, **options):
options = _fill_defaults(options, self.settings)
# TODO: no mapper??
from snobfit.snobfit import snobfit
self._update = MonitorRunner(problem=self.problem,
monitors=monitors)
x, fx, _ = snobfit(self.problem, self.problem.getp(),
self.problem.bounds(),
fglob=0, callback=self._monitor)
return x, fx
def _monitor(self, k, x, fx, improved):
# TODO: snobfit does have a population...
self._update(step=k, point=x, value=fx,
population_points=[x], population_values=[fx])
class DreamModel(MCMCModel):
"""
DREAM wrapper for fit problems.
"""
def __init__(self, problem=None, mapper=None):
"""
Create a sampling from the multidimensional likelihood function
represented by the problem set using dream.
"""
# print "dream"
self.problem = problem
self.bounds = self.problem.bounds()
self.labels = self.problem.labels()
self.mapper = mapper if mapper else lambda p: list(map(self.nllf, p))
def log_density(self, x):
return -self.nllf(x)
def nllf(self, x):
"""Negative log likelihood of seeing models given *x*"""
# Note: usually we will be going through the provided mapper, and
# this function will never be called.
# print "eval",x; sys.stdout.flush()
return self.problem.nllf(x)
def map(self, pop):
# print "calling mapper",self.mapper
return -np.array(self.mapper(pop))
class DreamFit(FitBase):
name = "DREAM"
id = "dream"
settings = [('samples', int(1e4)), ('burn', 100), ('pop', 10),
('init', 'eps'), ('thin', 1), ('alpha', 0.01),
('outliers', 'none'), ('trim', False),
('steps', 0), # deprecated: use --samples instead
]
def __init__(self, problem):
FitBase.__init__(self, problem)
self.dream_model = DreamModel(problem)
self.state = None
def solve(self, monitors=None, abort_test=None, mapper=None, **options):
from .dream import Dream
if abort_test is None:
abort_test = lambda: False
options = _fill_defaults(options, self.settings)
if mapper:
self.dream_model.mapper = mapper
self._update = MonitorRunner(problem=self.dream_model.problem,
monitors=monitors)
population = initpop.generate(self.dream_model.problem, **options)
pop_size = population.shape[0]
draws, steps = int(options['samples']), options['steps']
if steps == 0:
steps = (draws + pop_size-1) // pop_size
# TODO: need a better way to announce number of steps
# maybe somehow print iteration # of # iters in the monitor?
print("# steps: %d, # draws: %d"%(steps, pop_size*steps))
population = population[None, :, :]
sampler = Dream(model=self.dream_model, population=population,
draws=pop_size * steps,
burn=pop_size * options['burn'],
thinning=options['thin'],
monitor=self._monitor, alpha=options['alpha'],
outlier_test=options['outliers'],
DE_noise=1e-6)
self.state = sampler.sample(state=self.state, abort_test=abort_test)
self._trimmed = self.state.trim_portion() if options['trim'] else 1.0
#print("trimming", options['trim'], self._trimmed)
self.state.mark_outliers(portion=self._trimmed)
self.state.keep_best()
self.state.title = self.dream_model.problem.name
# TODO: Temporary hack to apply a post-mcmc action to the state vector
# The problem is that if we manipulate the state vector before saving
# it then we will not be able to use the --resume feature. We can
# get around this by just not writing state for the derived variables,
# at which point we can remove this notice.
# TODO: Add derived/visible variable support to other optimizers
fn, labels = getattr(self.problem, 'derive_vars', (None, None))
if fn is not None:
self.state.derive_vars(fn, labels=labels)
visible_vars = getattr(self.problem, 'visible_vars', None)
if visible_vars is not None:
self.state.set_visible_vars(visible_vars)
integer_vars = getattr(self.problem, 'integer_vars', None)
if integer_vars is not None:
self.state.set_integer_vars(integer_vars)
x, fx = self.state.best()
# Check that the last point is the best point
#points, logp = self.state.sample()
#assert logp[-1] == fx
#print(points[-1], x)
#assert all(points[-1, i] == xi for i, xi in enumerate(x))
return x, -fx
def entropy(self, **kw):
return self.state.entropy(portion=self._trimmed, **kw)
def _monitor(self, state, pop, logp):
# Get an early copy of the state
self.state = self._update.history.uncertainty_state = state
step = state.generation
x, fx = state.best()
self._update(step=step, point=x, value=-fx,
population_points=pop, population_values=-logp)
return True
def stderr(self):
"""
Approximate standard error as 1/2 the 68% interval fo the sample,
which is a more robust measure than the mean of the sample for
non-normal distributions.
"""
from .dream.stats import var_stats
vstats = var_stats(self.state.draw(portion=self._trimmed))
return np.array([(v.p68[1] - v.p68[0]) / 2 for v in vstats], 'd')
#def cov(self):
# # Covariance estimate from final 1000 points
# return np.cov(self.state.draw().points[-1000:])
def load(self, input_path):
from .dream.state import load_state, path_contains_saved_state
if path_contains_saved_state(input_path):
print("loading saved state from %s (this might take awhile) ..."
% (input_path,))
fn, labels = getattr(self.problem, 'derive_vars', (None, []))
self.state = load_state(input_path, report=100, derived_vars=len(labels))
else:
# Warn if mc files are not found on --resume path
warnings.warn("No mcmc found; ignoring --resume=%r"%input_path)
def save(self, output_path):
self.state.save(output_path)
def plot(self, output_path):
self.state.show(figfile=output_path, portion=self._trimmed)
self.error_plot(figfile=output_path)
def show(self):
pass
def error_plot(self, figfile):
# Produce error plot
import pylab
from . import errplot
# TODO: shouldn't mix calc and display!
res = errplot.calc_errors_from_state(problem=self.dream_model.problem,
state=self.state,
portion=self._trimmed)
if res is not None:
pylab.figure()
errplot.show_errors(res)
pylab.savefig(figfile + "-errors.png", format='png')
class Resampler(FitBase):
# TODO: why isn't cli.resynth using this?
def __init__(self, fitter):
self.fitter = fitter
raise NotImplementedError()
def solve(self, **options):
starts = options.pop('starts', 1)
restart = options.pop('restart', False)
x, fx = self.fitter.solve(**options)
points = _resampler(self.fitter, x, samples=starts,
restart=restart, **options)
self.points = points # save points for later plotting
return x, fx
def _resampler(fitter, xinit, samples=100, restart=False, **options):
"""
Refit the result multiple times with resynthesized data, building
up an array in Result.samples which contains the best fit to the
resynthesized data. *samples* is the number of samples to generate.
*fitter* is the (local) optimizer to use. **kw are the parameters
for the optimizer.
"""
x = xinit
points = []
try: # TODO: some solvers already catch KeyboardInterrupt
for _ in range(samples):
# print "== resynth %d of %d" % (i, samples)
fitter.problem.resynth_data()
if restart:
fitter.problem.randomize()
else:
fitter.problem.setp(x)
x, fx = fitter.solve(**options)
points.append(np.hstack((fx, x)))
# print self.problem.summarize()
# print "[chisq=%g]" % (nllf*2/self.problem.dof)
except KeyboardInterrupt:
# On keyboard interrupt we can declare that we are finished sampling
# without it being an error condition, so let this exception pass.
pass
finally:
# Restore the state of the problem
fitter.problem.restore_data()
fitter.problem.setp(xinit)
#fitter.problem.model_update() # setp does model update
return points
class FitDriver(object):
def __init__(self, fitclass=None, problem=None, monitors=None,
abort_test=None, mapper=None, **options):
self.fitclass = fitclass
self.problem = problem
self.options = options
self.monitors = monitors
self.abort_test = abort_test
self.mapper = mapper if mapper else lambda p: list(map(problem.nllf, p))
self.fitter = None
self.result = None
def fit(self, resume=None):
if hasattr(self, '_cov'):
del self._cov
if hasattr(self, '_stderr'):
del self._stderr
fitter = self.fitclass(self.problem)
if resume:
fitter.load(resume)
starts = self.options.get('starts', 1)
if starts > 1:
fitter = MultiStart(fitter)
t0 = perf_counter()
self.fitter = fitter
x, fx = fitter.solve(monitors=self.monitors,
abort_test=self.abort_test,
mapper=self.mapper,
**self.options)
self.time = perf_counter() - t0
self.result = x, fx
if x is not None:
self.problem.setp(x)
return x, fx
def clip(self):
"""
Force parameters within bounds so constraints are finite.
The problem is updated with the new parameter values.
Returns a list of parameter names that were clipped.
"""
labels = self.problem.labels()
values = self.problem.getp()
bounds = self.problem.bounds()
new_values = np.clip(values, bounds[0], bounds[1])
clipped = [name for name, old, new in zip(labels, values, new_values)
if old != new]
self.problem.setp(new_values)
return clipped
def entropy(self, method=None):
if hasattr(self.fitter, 'entropy'):
return self.fitter.entropy(method=method)
else:
from .dream import entropy
return entropy.cov_entropy(self.cov()), 0
def chisq(self):
if not hasattr(self, '_chisq'):
self._chisq = self.problem.chisq()
return self._chisq
def cov(self):
r"""
Return an estimate of the covariance of the fit.
Depending on the fitter and the problem, this may be computed from
existing evaluations within the fitter, or from numerical
differentiation around the minimum.
If the problem uses $\chi^2/2$ as its nllf, then the covariance
is derived from the Jacobian::
x = fit.problem.getp()
J = bumps.lsqerror.jacobian(fit.problem, x)
cov = bumps.lsqerror.jacobian_cov(J)
Otherwise, the numerical differentiation will use the Hessian
estimated from nllf::
x = fit.problem.getp()
H = bumps.lsqerror.hessian(fit.problem, x)
cov = bumps.lsqerror.hessian_cov(H)
"""
# Note: if fit() has not been run then self.fitter is None and in
# particular, self.fitter will not have a covariance matrix. In
# this case, the code will fall through to computing the covariance
# matrix directly from the problem. It will use the initial value
# stored in the problem parameters because results will also be None.
if not hasattr(self, '_cov'):
self._cov = None
if hasattr(self.fitter, 'cov'):
self._cov = self.fitter.cov()
#print("fitter cov", self._cov)
if self._cov is None:
# Use Jacobian if residuals are available because it is faster
# to compute. Otherwise punt and use Hessian. The has_residuals
# attribute should be True if present. It may be false if
# the problem defines a residuals method but doesn't really
# have residuals (e.g. to allow levenberg-marquardt to run even
# though it is not fitting a sum-square problem).
if hasattr(self.problem, 'has_residuals'):
has_residuals = self.problem.has_residuals
else:
has_residuals = hasattr(self.problem, 'residuals')
x = self.problem.getp() if self.result is None else self.result[0]
if has_residuals:
J = lsqerror.jacobian(self.problem, x)
#print("Jacobian", J)
self._cov = lsqerror.jacobian_cov(J)
else:
H = lsqerror.hessian(self.problem, x)
#print("Hessian", H)
self._cov = lsqerror.hessian_cov(H)
return self._cov
def stderr(self):
"""
Return an estimate of the standard error of the fit.
Depending on the fitter and the problem, this may be computed from
existing evaluations within the fitter, or from numerical
differentiation around the minimum.
"""
# Note: if fit() has not been run then self.fitter is None and in
# particular, self.fitter will not have a stderr method defined so
# it will compute stderr from covariance.
if not hasattr(self, '_stderr'):
self._stderr = None
if hasattr(self.fitter, 'stderr'):
self._stderr = self.fitter.stderr()
if self._stderr is None:
# If no stderr from the fitter then compute it from the covariance
self._stderr = self.stderr_from_cov()
return self._stderr
def stderr_from_cov(self):
"""
Return an estimate of standard error of the fit from covariance matrix.
Unlike stderr, which uses the estimate from the underlying
fitter (DREAM uses the MCMC sample for this), *stderr_from_cov*
estimates the error from the diagonal of the covariance matrix.
Here, the covariance matrix may have been estimated by the fitter
instead of the Hessian.
"""
if not hasattr(self, '_stderr_from_cov'):
self._stderr_from_cov = lsqerror.stderr(self.cov())
return self._stderr_from_cov
def show(self):
if hasattr(self.fitter, 'show'):
self.fitter.show()
if hasattr(self.problem, 'show'):
self.problem.show()
def show_err(self):
"""
Display the error approximation from the numerical derivative.
Warning: cost grows as the cube of the number of parameters.
"""
# TODO: need cheaper uncertainty estimate
# Note: error estimated from hessian diagonal is insufficient.
err = self.stderr_from_cov()
norm = np.sqrt(self.chisq())
print("=== Uncertainty from curvature: name"
" value(unc.) "
" value(unc./chi)) ===")
for k, v, dv in zip(self.problem.labels(), self.problem.getp(), err):
print("%40s %-15s %-15s" % (k,
format_uncertainty(v, dv),
format_uncertainty(v, dv/norm)))
print("="*75)
def show_cov(self):
cov = self.cov()
maxn = 1000 # max array dims to print
cov_str = np.array2string(
cov,
max_line_width=20*maxn, threshold=maxn*maxn,
precision=6, #suppress_small=True,
separator=', ',
)
print("=== Covariance matrix ===")
print(cov_str)
print("=========================")
def show_entropy(self, method=None):
print("Calculating entropy...")
S, dS = self.entropy(method=method)
print("Entropy: %s bits" % format_uncertainty(S, dS))
def save(self, output_path):
# print "calling driver save"
if hasattr(self.fitter, 'save'):
self.fitter.save(output_path)
if hasattr(self.problem, 'save'):
self.problem.save(output_path)
def load(self, input_path):
# print "calling driver save"
if hasattr(self.fitter, 'load'):
self.fitter.load(input_path)
if hasattr(self.problem, 'load'):
self.problem.load(input_path)
def plot(self, output_path, view=None):
# print "calling fitter.plot"
if hasattr(self.problem, 'plot'):
self.problem.plot(figfile=output_path, view=view)
if hasattr(self.fitter, 'plot'):
self.fitter.plot(output_path=output_path)
def _save_fit_cov(self, output_path):
model = getattr(self.problem, 'name', self.problem.__class__.__name__)
fitter = self.fitclass.id
cov = self.cov()
err = self.stderr_from_cov()
chisq = self.chisq()
state = {
'model': model,
'fitter': fitter,
}
def _fill_defaults(options, settings):
"""
Returns options dict with missing values filled from settings.
"""
result = dict(settings) # settings is a list of (key,value) pairs
result.update(options)
return result
FITTERS = []
FIT_AVAILABLE_IDS = []
FIT_ACTIVE_IDS = []
def register(fitter, active=True):
"""
Register a new fitter with bumps, if it is not already there.
*active* is False if you don't want it showing up in the GUI selector.
"""
# Check if already registered.
if fitter in FITTERS:
return
# Check that there is no other fitter of that name
if fitter.id in FIT_AVAILABLE_IDS:
raise ValueError("There is already a fitter registered as %r"
% fitter.id)
# Register the fitter.
FITTERS.append(fitter)
FIT_AVAILABLE_IDS.append(fitter.id)
# Make it "active" by listing it in the help menu.
if active:
FIT_ACTIVE_IDS.append(fitter.id)
# Register the fitters
register(SimplexFit, active=True)
register(DEFit, active=True)
register(DreamFit, active=True)
register(BFGSFit, active=True)
register(LevenbergMarquardtFit, active=True)
register(MPFit, active=True)
#register(PSFit, active=False)
register(PTFit, active=False)
#register(RLFit, active=False)
#register(SnobFit, active=False)
FIT_DEFAULT_ID = SimplexFit.id
assert FIT_DEFAULT_ID in FIT_ACTIVE_IDS
assert all(f in FIT_AVAILABLE_IDS for f in FIT_ACTIVE_IDS)
def fit(problem, method=FIT_DEFAULT_ID, verbose=False, **options):
"""
Simplified fit interface.
Given a fit problem, the name of a fitter and the fitter options,
it will run the fit and return the best value and standard error of
the parameters. If *verbose* is true, then the console monitor will
be enabled, showing progress through the fit and showing the parameter
standard error at the end of the fit, otherwise it is completely
silent.
Returns an *OptimizeResult* object containing "x" and "dx". The
dream fitter also includes the "state" object, allowing for more
detailed uncertainty analysis. Optimizer information such as the
stopping condition and the number of function evaluations are not
yet included.
To run in parallel (with multiprocessing and dream)::
from bumps.mapper import MPMapper
mapper = MPMapper.start_mapper(problem, None, cpu=0) #cpu=0 for all CPUs
result = fit(problem, method="dream", mapper=mapper)
"""
from scipy.optimize import OptimizeResult
#verbose = True
if method not in FIT_AVAILABLE_IDS:
raise ValueError("unknown method %r not one of %s"
% (method, ", ".join(sorted(FIT_ACTIVE_IDS))))
for fitclass in FITTERS:
if fitclass.id == method:
break
monitors = None if verbose else [] # default is step monitor
driver = FitDriver(
fitclass=fitclass, problem=problem, monitors=monitors,
**options)
driver.clip() # make sure fit starts within domain
x0 = problem.getp()
x, fx = driver.fit()
problem.setp(x)
dx = driver.stderr()
if verbose:
print("final chisq", problem.chisq_str())
driver.show_err()
result = OptimizeResult(
x=x, dx=driver.stderr(),
fun=fx,
success=True, status=0, message="successful termination",
#nit=0, # number of iterations
#nfev=0, # number of function evaluations
#njev, nhev # jacobian and hessian evaluations
#maxcv=0, # max constraint violation
)
if hasattr(driver.fitter, 'state'):
result.state = driver.fitter.state
return result |
py | 1a490d95d85c1802b30b5c9937635e39dc26a251 | #
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import re
import pytest
import pandas as pd
from pandas.api.types import is_datetime64_any_dtype, is_float_dtype, is_integer_dtype, is_string_dtype
from PIL import Image, ImageChops
import wave
from pardata.dataset import Dataset
from pardata.loaders import Loader
from pardata.loaders import FormatLoaderMap
from pardata.loaders._format_loader_map import load_data_files
from pardata.loaders.audio import WaveLoader
from pardata.loaders.image import PillowLoader
from pardata.loaders.text import PlainTextLoader
from pardata.loaders.table import CSVPandasLoader
class TestBaseLoader:
"Test loaders._base.*"
def test_abstract(self):
"Loader is an abstract class."
with pytest.raises(TypeError) as e:
Loader()
assert 'abstract class' in str(e.value)
def test_load(self, tmp_path):
"Loader.load() must be overridden upon Loader being inherited."
class MyLoader(Loader):
pass
# Error out when instantiating MyLoader because load method is not overridden
with pytest.raises(TypeError) as e:
MyLoader()
assert "Can't instantiate abstract class MyLoader with abstract method" in str(e.value)
class MyLoader(Loader):
def load(self, path, options):
# Calling the parent's load() method shouldn't lead to error
super().load(path, options)
# This line shouldn't error out even though it calls an abstract method in its parent
MyLoader().load(tmp_path, None)
def test_check_path(self):
"Test Loader.check_path method."
class MyLoader(Loader):
def load(self):
pass
loader = MyLoader()
integer = 1
with pytest.raises(TypeError) as e:
loader.check_path(integer)
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
class TestFormatLoaderMap:
"""Test loaders._format_loader.*. Various path types (regex and plain) are tested in test_datasets.py, since it is
easier and more natural to test there, and the test have already covered sufficiently for path types.
"""
def test_register_non_loader(self):
"Test when it registers a non-Loader instance."
flm = FormatLoaderMap()
with pytest.raises(TypeError) as e:
flm.register_loader('some-format', 'some-string')
assert str(e.value) == 'loader "some-string" must be a Loader instance.'
def test_load_non_existing_format(self, tmp_path):
"Test loading a non-existing format."
with pytest.raises(RuntimeError) as e:
load_data_files('nonsense', tmp_path, tmp_path)
assert str(e.value) == 'The format loader map does not specify a loader for format "nonsense".'
def test_load_wrong_format_type(self, tmp_path):
"Test loading a non-existing format."
with pytest.raises(TypeError) as e:
load_data_files(0x348f, tmp_path, tmp_path)
assert str(e.value) == 'Parameter "fmt" must be a string or a dict, but it is of type "<class \'int\'>".'
def test_load_unknown_type_of_path(self, tmp_path):
"Test loading an unknown type of the parameter ``path``."
with pytest.raises(TypeError) as e:
load_data_files('audio/wav', tmp_path, 12)
assert str(e.value) == f'Unsupported type of the "path" parameter: {type(12)}.'
def test_load_unknown_path_type(self, tmp_path):
"Test loading an unknown ``path[type]``."
with pytest.raises(ValueError) as e:
load_data_files('image/png', tmp_path, {'type': 'nonsense'})
assert str(e.value) == 'Unknown type of path "nonsense".'
class TestAudioLoaders:
def test_wave_loader(self, bell_sound):
"Test the normal functionality of WaveLoader."
with wave.open(str(bell_sound), 'rb') as local:
local_content = local.readframes(local.getnframes())
with WaveLoader().load(bell_sound, {}) as loaded:
loaded_content = loaded.readframes(loaded.getnframes())
assert local_content == loaded_content
def test_wave_loader_no_path(self):
"Test WaveLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
WaveLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
class TestImageLoaders:
def test_image_pillow_loader(self, saturn_image):
"Test the normal functionality of PillowLoader."
local = Image.open(saturn_image)
loaded = PillowLoader().load(saturn_image, {})
assert ImageChops.difference(local, loaded).getbbox() is None
def test_image_pillow_loader_no_path(self):
"Test PillowLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
PillowLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
class TestTextLoaders:
def test_plain_text_loader_no_path(self):
"Test PlainTextLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
PlainTextLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
def test_plain_text_loader_bad_encoding(self, tmp_path):
"Test PlainTextLoader when the encoding is nonsense."
text_file = tmp_path / 'some-text.txt'
text_file.write_text("I'm a text file :)", encoding='utf-8')
with pytest.raises(LookupError) as e:
PlainTextLoader().load(text_file, {'encoding': "non-encoding"})
assert str(e.value) == 'unknown encoding: non-encoding'
def test_plain_text_loader_incorrect_encoding(self, tmp_path):
"Test PlainTextLoader when the encoding does not match."
text_file = tmp_path / 'some-text.txt'
text_file.write_text("I'm a text file :)", encoding='utf-8')
with pytest.raises(UnicodeError) as e:
PlainTextLoader().load(text_file, {'encoding': "utf-16"})
assert str(e.value) == 'UTF-16 stream does not start with BOM'
class TestTableLoaders:
def test_csv_pandas_loader(self, tmp_path, noaa_jfk_schema):
"Test the basic functioning of CSVPandasLoader."
dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
data = dataset.data['jfk_weather_cleaned']
assert isinstance(data, pd.DataFrame)
assert data.shape == (75119, 16)
dataset.delete()
Column = namedtuple('Column', ('name', 'dtype', 'check'))
@pytest.mark.parametrize('columns', # a list of Column (column name, specified data type, check function)
[
# Only one column specified
[Column('DATE', 'datetime', is_datetime64_any_dtype)],
[Column('DATE', 'str', is_string_dtype)],
[Column('DATE', 'string', is_string_dtype)],
[Column('HOURLYPressureTendencyCons', 'float', is_float_dtype)],
# Two columns specified
[Column('DATE', 'datetime', is_datetime64_any_dtype),
Column('HOURLYPressureTendencyCons', 'float', is_float_dtype)],
# No column specified (let Pandas autodetect dtype)
[Column('DATE', None, is_string_dtype),
Column('HOURLYPressureTendencyCons', None, is_integer_dtype),
Column('HOURLYVISIBILITY', None, is_float_dtype)],
])
def test_csv_pandas_column_data_types(self, tmp_path, noaa_jfk_schema, columns):
"Test the column data types."
assert len(columns) > 0 # Sanity check, make sure columns are there
# Clear columns
column_dict = noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['columns'] = {}
# Update column dictionary as specified
for col in columns:
if col.dtype is not None:
column_dict[col.name] = col.dtype
dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
data = dataset.data['jfk_weather_cleaned']
for col in columns:
assert col.check(data.dtypes[col.name])
@pytest.mark.parametrize(('err_column', # (column name, specified data type, default dtype checked for conversion)
'other_columns'), # (column name, specified data type, None)
[
# Only one unsupported specified
(Column('DATE', 'float', 'str'), []),
(Column('HOURLYVISIBILITY', 'int', 'float'), []),
# Some supported specified
(Column('DATE', 'float', 'str'), [Column('HOURLYPressureTendencyCons', 'int', None)]),
(Column('HOURLYVISIBILITY', 'int', 'float'), [Column('DATE', 'datetime', None)]),
# More than one unsupported specified. The error that raises the exception should be
# put as err_column.
(Column('DATE', 'float', 'str'), [Column('HOURLYVISIBILITY', 'int', 'float')]),
])
def test_csv_pandas_column_unsupported_data_types(self, tmp_path, noaa_jfk_schema,
err_column, other_columns):
"Test column data types when they are unsupported."
# Clear columns
column_dict = noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['columns'] = {}
# Update column dictionary as specified
for col in other_columns:
if col.dtype is not None:
column_dict[col.name] = col.dtype
column_dict[err_column.name] = err_column.dtype
with pytest.raises(ValueError) as e:
Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
# Pandas is a 3rd-party library. We don't check for the exact wording but only some keywords
# Examples:
# ValueError: cannot safely convert passed user dtype of int64 for float64 dtyped data in column 1
# ValueError: could not convert string to float: '2010-01-01 01:00:00'
assert 'convert' in str(e.value)
for t in (err_column.dtype, err_column.check):
assert re.search(rf"{t}(\d*|ing)\b", str(e.value)) # "ing" is for "str'ing'"
def test_csv_pandas_no_delimiter(self, tmp_path, noaa_jfk_schema):
"Test when no delimiter is given."
# Remove the delimiter option
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['delimiter']
data = Dataset(noaa_jfk_schema, tmp_path,
mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD).data['jfk_weather_cleaned']
assert len(data.columns) == 16 # number of columns remain the same
@pytest.mark.parametrize('delimiter', ('\t', ' ', '|', ';'))
def test_csv_pandas_delimiter(self, tmp_path, noaa_jfk_schema, delimiter):
"Test common delimiter settings. Note that the case of comma has been tested in ``test_csv_pandas_loader``."
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['columns']
# Change delimiter to tab, |, ;, space
noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['delimiter'] = delimiter
data = Dataset(noaa_jfk_schema, tmp_path,
mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD).data['jfk_weather_cleaned']
# None of these delimiters exist in the file, number of columns should be 1.
assert len(data.columns) == 1
def test_csv_pandas_loader_no_path(self):
"Test CSVPandasLoader when fed in with non-path."
integer = 1
with pytest.raises(TypeError) as e:
CSVPandasLoader().load(integer, {})
assert str(e.value) == f'Unsupported path type "{type(integer)}".'
def test_csv_pandas_loader_non_option(self, tmp_path, noaa_jfk_schema):
"Test CSVPandasLoader when None option is passed."
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']
dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_AND_LOAD)
data = dataset.data['jfk_weather_cleaned']
assert isinstance(data, pd.DataFrame)
assert len(data) == 75119
def test_csv_pandas_loader_no_encoding(self, tmp_path, noaa_jfk_schema):
"Test CSVPandasLoader when no encoding is specified."
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['encoding']
self.test_csv_pandas_loader(tmp_path, noaa_jfk_schema)
def test_csv_pandas_header(self, tmp_path, noaa_jfk_schema):
"Test CSVPandasLoader header options"
noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['no_header'] = True
noaa_dataset = Dataset(noaa_jfk_schema, tmp_path, mode=Dataset.InitializationMode.DOWNLOAD_ONLY)
with pytest.raises(ValueError) as exinfo: # Pandas should error from trying to read string as another dtype
noaa_dataset.load()
assert('could not convert string to float' in str(exinfo.value))
noaa_dataset.delete()
false_test_cases = [False, '', None] # These should all be treated as False
for case in false_test_cases:
noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['no_header'] = case
self.test_csv_pandas_loader(tmp_path, noaa_jfk_schema)
del noaa_jfk_schema['subdatasets']['jfk_weather_cleaned']['format']['options']['no_header']
self.test_csv_pandas_loader(tmp_path, noaa_jfk_schema)
|
py | 1a490e116c31ca9d7e1e0ab48b1327f5fd8a5bde | """Constant optimizer used for deep symbolic regression."""
from functools import partial
import numpy as np
from scipy.optimize import minimize
def make_const_optimizer(name, **kwargs):
"""Returns a ConstOptimizer given a name and keyword arguments"""
const_optimizers = {
None : Dummy,
"dummy" : Dummy,
"scipy" : ScipyMinimize,
}
return const_optimizers[name](**kwargs)
class ConstOptimizer(object):
"""Base class for constant optimizer"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, f, x0):
"""
Optimizes an objective function from an initial guess.
The objective function is the negative of the base reward (reward
without penalty) used for training. Optimization excludes any penalties
because they are constant w.r.t. to the constants being optimized.
Parameters
----------
f : function mapping np.ndarray to float
Objective function (negative base reward).
x0 : np.ndarray
Initial guess for constant placeholders.
Returns
-------
x : np.ndarray
Vector of optimized constants.
"""
raise NotImplementedError
class Dummy(ConstOptimizer):
"""Dummy class that selects the initial guess for each constant"""
def __init__(self, **kwargs):
super(Dummy, self).__init__(**kwargs)
def __call__(self, f, x0):
return x0
class ScipyMinimize(ConstOptimizer):
"""SciPy's non-linear optimizer"""
def __init__(self, **kwargs):
super(ScipyMinimize, self).__init__(**kwargs)
def __call__(self, f, x0):
with np.errstate(divide='ignore'):
opt_result = partial(minimize, **self.kwargs)(f, x0)
x = opt_result['x']
return x
|
py | 1a490e46affaf29689af7bf040f4c758988a424c | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The module provides all database models at current HEAD.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
"""
from neutron.db import address_scope_db # noqa
from neutron.db import agents_db # noqa
from neutron.db import agentschedulers_db # noqa
from neutron.db import allowedaddresspairs_db # noqa
from neutron.db import dvr_mac_db # noqa
from neutron.db import external_net_db # noqa
from neutron.db import extradhcpopt_db # noqa
from neutron.db import extraroute_db # noqa
from neutron.db import flavors_db # noqa
from neutron.db import l3_agentschedulers_db # noqa
from neutron.db import l3_attrs_db # noqa
from neutron.db import l3_db # noqa
from neutron.db import l3_dvrscheduler_db # noqa
from neutron.db import l3_gwmode_db # noqa
from neutron.db import l3_hamode_db # noqa
from neutron.db.metering import metering_db # noqa
from neutron.db import model_base
from neutron.db import models_v2 # noqa
from neutron.db import portbindings_db # noqa
from neutron.db import portsecurity_db # noqa
from neutron.db.qos import models as qos_models # noqa
from neutron.db.quota import models # noqa
from neutron.db import rbac_db_models # noqa
from neutron.db import securitygroups_db # noqa
from neutron.db import servicetype_db # noqa
from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa
from neutron.plugins.bigswitch.db import consistency_db # noqa
from neutron.plugins.bigswitch import routerrule_db # noqa
from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers import type_flat # noqa
from neutron.plugins.ml2.drivers import type_geneve # noqa
from neutron.plugins.ml2.drivers import type_gre # noqa
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.plugins.ml2 import models # noqa
from neutron.plugins.nuage import nuage_models # noqa
def get_metadata():
return model_base.BASEV2.metadata
|
py | 1a490e8d6290c557368721d78b212fcb309e9bb4 | # -*- coding: utf-8 -*-
"""
flask_jsondash.db
~~~~~~~~~~~~~~~~~~~~~~~~~~
A translation adapter for transparent operations between storage types.
:copyright: (c) 2016 by Chris Tabor.
:license: MIT, see LICENSE for more details.
"""
import json
from datetime import datetime as dt
from pymongo import MongoClient
from flask_jsondash import mongo_adapter, settings
DB_NAME = settings.ACTIVE_DB
def reformat_data(data, c_id):
"""Format/clean existing config data to be re-inserted into database.
Args:
data (dict): The chart data to override with standard params.
Returns:
data (dict): The in-place updated dict.
"""
data.update(dict(id=c_id, date=dt.now()))
return data
def format_charts(data):
"""Form chart POST data for JSON usage within db.
Args:
data (dict): The request.form data to format.
Returns:
modules (list): A list of json-decoded dictionaries.
"""
modules = []
for item in data:
if item.startswith('module_'):
val_json = json.loads(data[item])
modules.append(val_json)
return modules
def get_db_handler():
"""Get the appropriate db adapter.
Returns:
object: The instantiated database handler
"""
if DB_NAME == 'mongo':
client = MongoClient(host=settings.DB_URI, port=settings.DB_PORT)
conn = client[settings.DB_NAME]
coll = conn[settings.DB_TABLE]
return mongo_adapter.Db(client, conn, coll, format_charts)
else:
raise NotImplementedError(
'Mongodb is the only supported database right now.')
|
py | 1a490e9f4e95b7ec5f2c26fdf5161722e000a9a1 | from django.urls import path
from .views import BookmarkDetail, BookmarkDelete, BookmarkCreate, BookmarkUpdate, BookmarkList
# namespace = 이름 공간
# 다른 앱들과 url pattern 이름이 겹치는 것을 방지하기 위해서 사용
# 2.x버전 이전에는 namespace라는 인수가 존재
app_name = 'bookmark'
urlpatterns = [
# 함수형 뷰 : 이름만 쓴다
# 클래스형 뷰 : 이름.as_view()
path('', BookmarkList.as_view(), name='index'),
path('create/', BookmarkCreate.as_view(), name='create'),
path('delete/<int:pk>/', BookmarkDelete.as_view(), name='delete'),
path('update/<int:pk>/', BookmarkUpdate.as_view(), name='update'),
path('detail/<int:pk>/', BookmarkDetail.as_view(), name='detail'),
]
|
py | 1a490eeff41c11bb45b3d9c7ae7aa4ef9b58eb2d | import _sk_fail; _sk_fail._("warnings")
|
py | 1a491004b8b92d2cf0ad47498b6630d2a322fe53 | from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, replace
from typing import Any, Dict, Type, TypeVar
_T = TypeVar("_T", covariant=True)
class Id(ABC):
"""Id
"""
@abstractmethod
def __eq__(self, other: Id) -> bool:
raise NotImplementedError
@abstractmethod
def __ne__(self, other: Id) -> bool:
raise NotImplementedError
@abstractmethod
def __lt__(self, other: Id) -> bool:
raise NotImplementedError
@abstractmethod
def __le__(self, other: Id) -> bool:
raise NotImplementedError
@abstractmethod
def __gt__(self, other: Id) -> bool:
raise NotImplementedError
@abstractmethod
def __ge__(self, other: Id) -> bool:
raise NotImplementedError
@classmethod
@abstractmethod
def generate(cls) -> Id:
raise NotImplementedError
@dataclass(eq=False, frozen=True)
class Entity:
id_: Id
def __eq__(self, other: Entity) -> bool:
return self.id_ == other.id_
def __ne__(self, other: Entity) -> bool:
return self.id_ != other.id_
def __lt__(self, other: Entity) -> bool:
return self.id_ < other.id_
def __le__(self, other: Entity) -> bool:
return self.id_ <= other.id_
def __gt__(self, other: Entity) -> bool:
return self.id_ > other.id_
def __ge__(self, other: Entity) -> bool:
return self.id_ >= other.id_
def _update(self, **changes: Any):
return replace(self, **changes)
def as_role(self, role: Type[_T]) -> _T:
if issubclass(self.__class__, role):
raise TypeError(
f"{role.__name__} is not a {self.__class__.__name__} role object."
)
return role(**self.as_dict())
def as_dict(self) -> Dict[str, Any]:
return asdict(self)
|
py | 1a491041db2139156839a1b13190da20fb09f341 | # Colorama module: pip install colorama
from colorama import init, Fore, Style
# Selenium module imports: pip install selenium
from selenium import webdriver
from selenium.common.exceptions import TimeoutException as TE
from selenium.common.exceptions import ElementClickInterceptedException as ECIE
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as WDW
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
# Python default import.
from time import sleep
from glob import glob
import os
"""Colorama module constants."""
init(convert=True) # Init colorama module.
red = Fore.RED # Red color.
green = Fore.GREEN # Green color.
yellow = Fore.YELLOW # Yellow color.
reset = Style.RESET_ALL # Reset color attribute.
class Settings(object):
"""Contains all settings of upload."""
def __init__(self, file: str, filetype: str) -> None:
"""Open Settings JSON file and read it."""
self.filetype = filetype[1:] # Type of data file.
if self.filetype == 'json':
from json import loads
self.file = loads(open(file, encoding='utf-8').read())['nft']
self.len_file = len(self.file) # Lenght of file.
elif self.filetype == 'csv':
self.file = open(file, encoding='utf-8').read().splitlines()[1:]
self.len_file = len(self.file) # Lenght of file.
elif self.filetype == 'xlsx':
from pandas import read_excel
self.file = read_excel(file) # Read Excel (XLSX) file.
self.len_file = self.file.shape[0] # Get number of rows.
self.file = self.file.to_dict() # Transform XLSX to dict.
else:
import sys
sys.exit(f'{red}File extension is not support.{reset}')
def create_parameters(self, parameters: list) -> None:
"""Create parameters."""
# Upload:
self.file_path = str(parameters[0])
self.nft_name = str(parameters[1])
self.external_link = parameters[2]
self.description = str(parameters[3])
self.collection = str(parameters[4])
self.properties: list = self.type_parameters(
parameters[5], 2) # [[type, name], ...]
self.levels: list = self.type_parameters(
parameters[6], 3) # [[name, from, to], ...]
self.stats: list = self.type_parameters(
parameters[7], 3) # [[name, from, to], ...]
self.unlockable_content: list = parameters[8] # [bool, text]
self.explicit_and_sensitive_content: bool = parameters[9]
self.supply: int = parameters[10]
self.blockchain: str = parameters[11]
# Sell:
self.price: int = parameters[12]
self.quantity: int = parameters[13]
def type_parameters(self, parameters: list, _range: int) -> list:
"""Change element's type of some parameters."""
if len(parameters) > 0:
if type(parameters[0]) == list:
for parameter in range(len(parameters)):
for element in range(_range):
parameters[parameter][element] = \
str(parameters[parameter][element])
else:
for element in range(_range):
parameters[element] = str(parameters[element])
return parameters
def get_nft(self, nft: int) -> None:
"""Get all settings of NFT."""
self.nft = nft
if self.filetype == 'json':
self.json_file()
def type_checker(self, nft_settings: list) -> list:
"""Type with correctly string element in list."""
from ast import literal_eval
_list = []
nft_settings = nft_settings.split(';') \
if self.filetype == 'csv' else nft_settings
for element in nft_settings:
element = str(element).strip() # Remove whitespaces.
# Check if element is a list like.
if element != '':
if element[0] == '[' and element[len(element) - 1] == ']':
element = literal_eval(element)
# Check if element is a boolean like.
elif element == 'True' or element == 'False':
element = bool(element)
# Check if element is a integer like.
elif element.isdigit():
element = int(element)
elif element.replace('.', '').isdigit():
element = float(element)
_list.append(element)
return _list
def json_file(self) -> None:
"""Transform JSON list/dict to a whole list."""
nft_settings = self.file[self.nft]
# Get key's value from the NFT data.
nft_settings = [nft_settings[settings] for settings in nft_settings]
_list = [] # Init a new list.
for element in nft_settings: # Take each element in list.
_list.append(self.dict_checker(element)) # Check element.
# Create parameters from list.
self.create_parameters(_list)
def dict_checker(self, element):
"""Check if element is a dict or not."""
if type(element) == list: # If element is a list.
final_list = [] # Final list that will be return.
for item in element: # For each item in this list.
temp_list = [] # Store all key's value.
if type(item) == dict: # If element is a dict.
for key in item: # For each key in dict (item).
temp_list.append(item.get(key)) # Get key's value.
else:
temp_list = item # Do nothing.
final_list.append(temp_list) # Append each temp list.
return final_list
else:
return element # Else return the same element.
class Opensea(object):
"""Main class of Opensea automatic uploader."""
def __init__(self, password: str, recovery_phrase: str) -> None:
"""Get the password and the recovery_phrase from the text file."""
# Get recovery phrase of MetaMask wallet.
self.recovery_phrase = recovery_phrase
self.password = password # Get new password.
# Used files path.
self.webdriver_path = 'assets/chromedriver.exe'
self.metamask_extension_path = 'assets/MetaMask.crx'
self.driver = self.webdriver() # Start new webdriver.
# Opensea URLs.
self.login_url = 'https://opensea.io/login?referrer=%2Fasset%2Fcreate'
self.create_url = 'https://opensea.io/asset/create'
def webdriver(self):
"""Start webdriver and return state of it."""
options = webdriver.ChromeOptions() # Configure options for Chrome.
options.add_extension(self.metamask_extension_path) # Add extension.
# options.add_argument("headless") # Headless ChromeDriver.
options.add_argument("log-level=3") # No logs is printed.
options.add_argument("--mute-audio") # Audio is muted.
driver = webdriver.Chrome(self.webdriver_path, options=options)
driver.maximize_window() # Maximize window to reach all elements.
return driver
def element_clickable(self, element: str) -> None:
"""Click on element if it's clickable using Selenium."""
try:
WDW(self.driver, 5).until(EC.element_to_be_clickable(
(By.XPATH, element))).click()
except ECIE:
# Sometimes the element is not clickable.
self.driver.execute_script(
"arguments[0].click();",
self.driver.find_element_by_xpath(element))
def element_visible(self, element: str, timer: int = 5):
"""Check if element is visible using Selenium."""
return WDW(self.driver, timer).until(EC.visibility_of_element_located(
(By.XPATH, element)))
def element_send_keys(self, element: str, keys: str) -> None:
"""Send keys to element if it's visible using Selenium."""
try:
WDW(self.driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, element))).send_keys(keys)
except TE:
# Some elements are not visible but are still present.
WDW(self.driver, 5).until(EC.presence_of_element_located(
(By.XPATH, element))).send_keys(keys)
def clear_text(self, element) -> None:
"""Clear text from input."""
self.element_clickable(element)
webdriver.ActionChains(self.driver).key_down(Keys.CONTROL).perform()
webdriver.ActionChains(self.driver).send_keys('a').perform()
webdriver.ActionChains(self.driver).key_up(Keys.CONTROL).perform()
def window_handles(self, window_number: int) -> None:
"""Check for window handles and wait until a specific tab is opened."""
wait = 0
while True:
# If asked tab is opened.
sleep(2)
if len(self.driver.window_handles) == window_number + 1:
return True
elif wait == 10:
return False
wait += 1
def metamask(self) -> None:
"""Login to MetaMask extension."""
print('Login to MetaMask extension.', end=' ')
# Switch to MetaMask extension's tab.
self.driver.switch_to.window(self.driver.window_handles[0])
# Refresh MetaMask extension's tab while extension is not loaded.
while True:
# If page is fully loaded.
if 'initialize' in self.driver.current_url:
break
self.driver.refresh() # Reload page.
sleep(1) # Wait 1 second.
# Click on "Start" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/div/button')
# Click on "Import wallet" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/div/'
'div[2]/div/div[2]/div[1]/button')
# Click on "I agree" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/div/'
'div/div[5]/div[1]/footer/button[2]')
# Input recovery phrase.
self.element_send_keys('//*[@id="app-content"]/div/div[3]/div/div/'
'form/div[4]''/div[1]/div/input',
self.recovery_phrase)
# Input new password.
self.element_send_keys('//*[@id="password"]', self.password)
self.element_send_keys('//*[@id="confirm-password"]', self.password)
# Check "I have read and agree to the..." checkbox.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/form/div[7]/div')
# Click on "Import" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/form/button')
# Click on "All done" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div/button')
print(f'{green}Logged to Metamask extension.{reset}')
def opensea_login(self) -> None:
"""Login to Opensea using Metamask."""
print('Login to Opensea.', end=' ')
self.driver.switch_to.window(self.driver.window_handles[1]) \
if self.window_handles(1) else self.retry_login(0)
self.driver.get(self.login_url) # Go to Opensea login URL.
# Click on "Metamask" button in list of wallet.
ul = len(self.element_visible(
'//*[@id="__next"]/div[1]/main/div/div/div/div[2]/ul'
).find_elements_by_tag_name('li'))
for li in range(ul):
li += 1 # Add 1 to start li element at li[1].
# Check if button text contains "MetaMask".
if self.element_visible(
'//*[@id="__next"]/div[1]/main/div/div/div/div[2]/ul/li'
f'[{li}]/button/div[2]/span').text == 'MetaMask':
# Click on Metamask button.
self.element_clickable('//*[@id="__next"]/div[1]/main/div/div'
f'/div/div[2]/ul/li[{li}]/button')
break
# Switch on MetaMask popup tab.
self.driver.switch_to.window(self.driver.window_handles[2]) \
if self.window_handles(2) else self.retry_login(0)
# Click on "Next" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/'
'div[2]/div[4]/div[2]/button[2]')
# Click on "Connect" button.
self.element_clickable('//*[@id="app-content"]/div/div[3]/div/'
'div[2]/div[2]/div[2]/footer/button[2]')
self.metamask_sign()
# Reload page and retry to log in to Opensea if failed.
try:
WDW(self.driver, 10).until(EC.url_to_be(self.create_url))
print(f'{green}Logged to Opensea.{reset}\n')
except TE:
self.retry_login()
def metamask_sign(self) -> None:
"""Metamask confirm connection."""
# Switch on MetaMask popup tab.
self.driver.switch_to.window(self.driver.window_handles[2]) \
if self.window_handles(2) else self.retry_login(0)
# Click on "Sign" button.
self.element_clickable(
'//*[@id="app-content"]/div/div[3]/div/div[3]/button[2]')
# Switch back to Opensea tab.
self.driver.switch_to.window(self.driver.window_handles[1]) \
if self.window_handles(1) else self.retry_login(0)
def retry_login(self, value: int = 1) -> None:
"""Retry to log in to Opensea after an error occured."""
print(f'{red}Failed to login to Opensea, Retrying.{reset}')
if value == 0:
self.opensea_login()
else:
self.driver.get(self.create_url)
self.metamask_sign()
def opensea_upload(self, number: int) -> None:
"""Upload multiple NFTs automatically on Opensea."""
try:
print(f'Uploading {settings.nft_name}/{len(settings.file)}.',
end=' ')
# Go to Opensea login URL.
self.driver.get(self.create_url + '?enable_supply=true')
# Upload NFT file.
if not os.path.exists(settings.file_path) \
or settings.file_path == '':
raise TE('File doesn\'t exist.')
self.element_send_keys('//*[@id="media"]', settings.file_path)
# Input NFT name.
if settings.nft_name == '':
raise TE('Missing NFT Name.')
self.element_send_keys('//*[@id="name"]', settings.nft_name)
# Input external link.
if settings.external_link != '':
self.element_send_keys(
'//*[@id="external_link"]', settings.external_link)
# Input description.
if settings.description != '':
self.element_send_keys(
'//*[@id="description"]', settings.description)
# Input collection and select it.
if settings.collection != '':
self.element_send_keys(
'//*[@id="__next"]/div[1]/main/div/div/section/div/form/'
'div[5]/div/div[2]/input', settings.collection)
try:
sleep(2)
self.element_clickable(
'//*[contains(@id, "tippy")]/div/div/div/ul/li/button')
except Exception:
raise TE('Collection doesn\'t exist')
# Add properties, levels and stats.
parameters = [settings.properties, settings.levels, settings.stats]
for index in range(3):
if len(parameters[index]) > 0:
# Change element from list of string to list of list.
# https://github.com/maximedrn/opensea_automatic_uploader/issues/1
if type(parameters[index][0]) != list:
parameters[index] = [parameters[index]]
# Click on "+" button for properties, levels and stats.
self.element_clickable(
'//*[@id="__next"]/div[1]/main/div/div/section/div/'
f'form/section/div[{index + 1}]/div/div[2]/button')
parameter = 0
for element in parameters[index]:
# If there are more than 1 element.
if parameter > 0:
# Click on "Add more" button.
self.element_clickable(
f'/html/body/div[{index + 2}]/div/div/div/'
'section/button')
parameter += 1
self.element_send_keys(
f'/html/body/div[{index + 2}]/div/div/div/section/'
f'table/tbody/tr[{parameter}]/td[1]/div/div/input',
element[0])
if len(element) == 3:
actual_element = (
f'/html/body/div[{index + 2}]/div/div/div/'
f'section/table/tbody/tr[{parameter}]/td[3]'
'/div/div/input')
self.clear_text(actual_element)
self.element_send_keys(actual_element, element[2])
actual_element = (
f'/html/body/div[{index + 2}]/div/div/div/section/'
f'table/tbody/tr[{parameter}]/td[2]/div/div/input')
self.clear_text(actual_element)
self.element_send_keys(actual_element, element[1])
# Click on "Save" button.
self.element_clickable(f'/html/body/div[{index + 2}]/div'
'/div/div/footer/button')
# Click on "Unlockable Content" switch if true.
if settings.unlockable_content != '':
if len(settings.unlockable_content) > 0:
if settings.unlockable_content[0]:
self.element_send_keys(
'//*[@id="unlockable-content-toggle"]', Keys.ENTER)
# Send text content.
self.element_send_keys(
'//*[@id="__next"]/div[1]/main/div/div/section/div'
'/form/section/div[4]/div[2]/textarea',
settings.unlockable_content[1])
# Click on "Explicit & Sensitive Content" switch if true.
if settings.explicit_and_sensitive_content != '':
if settings.explicit_and_sensitive_content:
self.element_send_keys(
'//*[@id="explicit-content-toggle"]', Keys.ENTER)
# Set Blockchain.
if settings.blockchain != '':
blockchain = self.element_visible('//*[@id="chain"]')
if blockchain.get_attribute('value') != settings.blockchain:
# Click on bottom sheet.
self.element_clickable(
'//*[@id="__next"]/div[1]/main/div/div'
'/section/div/form/div[7]/div/div[2]')
# Get lenght of elements list.
ul = len(self.element_visible(
'//*[@id="tippy-9"]/div/div/div/ul'
).find_elements_by_tag_name('li'))
# Find Blockchain in list.
for li in range(ul):
li += 1 # Add 1 to start li element at li[1].
# Check if span text contains Blockchain.
if self.element_visible(
f'//*[@id="tippy-9"]/div/div/div/ul/li[{li}]'
'/button/div[2]/span[1]').text \
== settings.blockchain:
# Click on specific Blockchain button.
self.element_clickable('//*[@id="tippy-9"]/div/div'
f'/div/ul/li[{li}]/button')
break
sleep(2)
# Set number of supply.
if settings.supply != "" and type(settings.supply) == int:
if (
"?enable_supply=true" in self.driver.current_url
and settings.supply > 1
):
# Set supply modifying value.
self.driver.execute_script(
f'arguments[0].value = "";',
self.element_visible('//*[@id="supply"]'))
self.element_send_keys(
'//*[@id="supply"]', settings.supply)
sleep(2)
# Click on "Create" button.
self.element_clickable('//*[@id="__next"]/div[1]/main/div/div/'
'section/div/form/div/div[1]/span/button')
sleep(10)
# Check if done.
self.element_visible('/html/body/div[5]/div/div/div/div[1]', 10)
print(f'{green}Done.{reset}')
# If price has been defined.
if settings.price > 0:
self.sell_nft() # Sell NFT.
else:
print(f'{red}NFT sale cancelled.{reset}')
except TE as error:
print(f'{red}Failed: {error}{reset}')
def sell_nft(self) -> None:
"""Set a price for the NFT, etc."""
try:
# Get sell page for the NFT.
self.driver.get(self.driver.current_url + '/sell')
if settings.supply > 1 and \
settings.blockchain.lower() == 'polygon':
# Input number of supplies to sell.
if settings.quantity <= settings.supply:
self.driver.execute_script(
f'arguments[0].value = "";',
self.element_visible('//*[@id="quantity"]'))
self.element_send_keys(
'//*[@id="quantity"]', str(settings.quantity))
else:
raise TE('Quantity must be less or equal to supply.')
if settings.supply == 1 and \
settings.blockchain.lower() != 'polygon':
# Input Ethereum price.
self.element_send_keys(
'//*[@id="__next"]/div[1]/main/div/div/div[3]/div/div'
'[2]/div/div[1]/form/div[2]/div/div[2]/div/div/div[2]'
'/input', str(settings.price))
else:
# Input price.
self.element_send_keys(
'//input[@name="price"]', str(settings.price))
# Click on "Complete listing" button.
try:
self.element_clickable('//button[@type="submit"]')
except Exception:
raise TE('An error occured. Submit button can\'t be clicked')
# Click on "Create" button.
try:
self.element_clickable('//*[@class="ActionPanel--content"]/button')
except Exception:
raise TE('An error occured. Sell button can\'t be clicked')
WDW(webdriver, timeout=1)
# Sign Metamask
self.metamask_sign()
WDW(webdriver, timeout=5)
# click 'x' icon to close the popup and bring up the page for the NFT that was just listed
self.element_clickable('//button[@class="UnstyledButtonreact__UnstyledButton-sc-ty1bh0-0 btgkrL"]')
WDW(webdriver, timeout=7)
print(f'{green}NFT put up for sale.{reset}')
except TE as error:
print(f'{red}NFT sale cancelled: {error}{reset}')
def cls() -> None:
"""Clear console function."""
# Clear console for Windows using 'cls' and Linux & Mac using 'clear'.
os.system('cls' if os.name == 'nt' else 'clear')
def read_file(file_: str, question: str) -> str:
"""Read file or ask for data to write in text file."""
if not os.path.isfile(f'assets/{file_}.txt'):
open(f'assets/{file_}.txt', 'a')
with open(f'assets/{file_}.txt', 'r+', encoding='utf-8') as file:
text = file.read()
if text == '':
text = input(question)
if input(f'Do you want to save your {file_} in'
' text file? (y/n) ').lower() == 'y':
file.write(text)
print(f'{green}Saved.{reset}')
else:
print(f'{yellow}Not saved.{reset}')
return text
def data_file() -> str:
"""Read data folder and extract JSON, CSV and XLSX files."""
while True:
folder = [glob(f'data/{extension}')
for extension in ['*.json', '*.csv', '*.xlsx']]
print(f'{yellow}\nChoose your file:{reset}')
file_number = 0
files = []
print('0 - Browse file on PC.')
for extension in folder:
for file in extension:
file_number += 1
files.append(file)
print(f'{file_number} - {file}')
answer = input('File number: ')
cls() # Clear console.
if answer.isdigit():
if int(answer) == 0:
# Browse file on PC.
from tkinter import Tk
from tkinter.filedialog import askopenfilename
Tk().withdraw() # Hide Tkinter tab.
return askopenfilename(filetypes=[('', '.json .csv .xlsx')])
elif int(answer) <= len(files):
return files[int(answer) - 1]
else:
print(f'{red}File doesn\'t exist.{reset}')
else:
print(f'{red}Answer must be an integer.{reset}')
if __name__ == '__main__':
cls() # Clear console.
password = read_file('password', '\nWhat is your MetaMask password? ')
recovery_phrase = read_file('recovery_phrase',
'\nWhat is your MetaMask recovery phrase? ')
file = data_file() # Ask for file.
# Init Settings class.
settings = Settings(file, os.path.splitext(file)[1])
# Init Opensea class and send password and recovery phrase.
opensea = Opensea(password, recovery_phrase)
opensea.metamask() # Connect to MetaMask.
opensea.opensea_login() # Connect to Opensea.
# Upload each NFT one by one.
for element in range(settings.len_file):
settings.get_nft(element) # Get data of the NFT.
opensea.opensea_upload(element + 1) # Upload it.
|
py | 1a4910a941b96cacebdc1000d139a97587c376b5 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import os
import numpy as np
from pandapower.auxiliary import ppException
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
class MapboxTokenMissing(ppException):
"""
Exception being raised in case loadflow did not converge.
"""
pass
def _on_map_test(x, y):
"""
checks if bus_geodata can be located on a map using geopy
"""
try:
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
geolocator = Nominatim(user_agent="pandapower_user_mapboxplot")
except ImportError:
# if geopy is not available there will be no geo-coordinates check
# therefore if geo-coordinates are not real and user sets on_map=True, an empty map will be plot!
logger.warning('Geo-coordinates check cannot be peformed because geopy package not available \n\t--> '
'if geo-coordinates are not in lat/lon format an empty plot may appear...')
return True
try:
location = geolocator.reverse("{0}, {1}".format(x, y), language='en-US')
except GeocoderTimedOut:
logger.Error("Existing net geodata cannot be geo-located: possible reason: geo-data not in lat/long ->"
"try geo_data_to_latlong(net, projection) to transform geodata to lat/long!")
if location.address is None:
return False
else:
return True
def geo_data_to_latlong(net, projection):
"""
Transforms network's geodata (in `net.bus_geodata` and `net.line_geodata`) from specified projection to lat/long (WGS84).
INPUT:
**net** (pandapowerNet) - The pandapower network
**projection** (String) - projection from which geodata are transformed to lat/long. some examples
- "epsg:31467" - 3-degree Gauss-Kruger zone 3
- "epsg:2032" - NAD27(CGQ77) / UTM zone 18N
- "epsg:2190" - Azores Oriental 1940 / UTM zone 26N
"""
try:
from pyproj import Proj, transform
except ImportError:
logger.warning('Geo-coordinates check cannot be peformed because pyproj package not available \n\t--> '
'if geo-coordinates are not in lat/lon format an empty plot may appear...')
return
if projection == 'epsg:4326':
return
wgs84 = Proj(init='epsg:4326') # lat/long
try:
projection = Proj(init=projection)
except:
logger.warning("Transformation of geodata to lat/long failed! because of:]\n"
"Unknown projection provided "
"(format 'epsg:<number>' required as available at http://spatialreference.org/ref/epsg/ )")
return
# transform all geodata to long/lat using set or found projection
try:
lon, lat = transform(projection, wgs84, net.bus_geodata.loc[:, 'x'].values, net.bus_geodata.loc[:, 'y'].values)
net.bus_geodata.loc[:, 'x'], net.bus_geodata.loc[:, 'y'] = lon, lat
if net.line_geodata.shape[0] > 0:
for idx in net.line_geodata.index:
line_coo = np.array(net.line_geodata.loc[idx, 'coords'])
lon, lat = transform(projection, wgs84, line_coo[:, 0], line_coo[:, 1])
net.line_geodata.loc[idx, 'coords'] = np.array([lon, lat]).T.tolist()
return
except:
logger.warning('Transformation of geodata to lat/long failed!')
return
def set_mapbox_token(token):
from pandapower import pp_dir
path = os.path.join(pp_dir, "plotting", "plotly")
filename = os.path.join(path, 'mapbox_token.txt')
with open(filename, "w") as mapbox_file:
mapbox_file.write(token)
def _get_mapbox_token():
from pandapower import pp_dir
path = os.path.join(pp_dir, "plotting", "plotly")
filename = os.path.join(path, 'mapbox_token.txt')
with open(filename, "r") as mapbox_file:
return mapbox_file.read()
|
py | 1a491109b4f79e4d5d7d8d5beb90365054e4af62 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
from setuptools import setup, find_packages
import os
import imp
def non_python_files(path):
""" Return all non-python-file filenames in path """
result = []
all_results = []
module_suffixes = [info[0] for info in imp.get_suffixes()]
ignore_dirs = ['cvs']
for item in os.listdir(path):
name = os.path.join(path, item)
if (
os.path.isfile(name) and
os.path.splitext(item)[1] not in module_suffixes
):
result.append(name)
elif os.path.isdir(name) and item.lower() not in ignore_dirs:
all_results.extend(non_python_files(name))
if result:
all_results.append((path, result))
return all_results
data_files = (
# non_python_files('emissary') +
# non_python_files(os.path.join('Emissary', 'doc'))
)
setup(name='Emissary',
version="2.1.1",
description='A microservice for indexing the plain text of articles and essays',
author='Luke Brooks',
author_email='[email protected]',
url='http://psybernetics.org.uk/emissary',
download_url = 'https://github.com/LukeB42/Emissary/tarball/2.0.0',
data_files = data_files,
packages=['emissary', 'emissary.resources', 'emissary.controllers'],
include_package_data=True,
install_requires=[
"setproctitle",
"goose-extractor",
"lxml",
"gevent",
"Flask-RESTful",
"Flask-SQLAlchemy",
"cssselect",
"BeautifulSoup",
"feedparser",
"python-snappy",
"requests",
"pygments",
"window",
],
keywords=["text extraction","document archival","document retrieval"]
)
|
py | 1a49114e216f9eb185f1e2530e25af136120ee91 | import math
#in production you want to use numpy since lists are slow as fuck
#matrix operations
def shape(A):
num_rows = len(A)
num_cols = len(A[0])
return num_rows,num_cols
def make_matrix(num_rows,num_cols,entry_fn):
return [[entry_fn(i,j)
for j in range(num_cols]
for i in range(num_rows)]
def is_diagonal(i,j):
return 1 if i == j else 0
def all_zero:
return 0
def get_column(A):
#vectors
def vector_add(v,w):
return [v_i + w_i for v_i,w_i in zip(v,w)]
def vector_sum(vectors):
result = vectors[0]
for vector in vectors[1:]:
result = vector_add(result,vector)
return result
def vector_sum(vectors):
return reduce(vector_add,vectors)
def scalar_multiply(c,v):
return [c * v_i for v_i in v]
def vector_mean(vectors):
n = len(vectors)
return scalar_multiply(1/n,vector_sum(vectors))
def dot(v,w):
return sum(v_i * w_i for v_i,w_i in zip(v,w))
def sum_of_squares(v):
return dot(v,v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v,w):
return sum_of_squares(vector_substract(v,w))
def distance(v,w):
return math.sqrt(squared_distance(v,w))
|
py | 1a4914666534e0df33b85e522336cf9cfb8da7d7 |
from sklearn.pipeline import FeatureUnion, Pipeline
import pandas as pd
import numpy as np
from typing import List,Tuple
def extract_feature_names(model, name) -> List[str]:
"""Extracts the feature names from arbitrary sklearn models
Args:
model: The Sklearn model, transformer, clustering algorithm, etc. which we want to get named features for.
name: The name of the current step in the pipeline we are at.
Returns:
The list of feature names. If the model does not have named features it constructs feature names
by appending an index to the provided name.
"""
if hasattr(model, "get_feature_names"):
return model.get_feature_names()
elif hasattr(model, "n_clusters"):
return [f"{name}_{x}" for x in range(model.n_clusters)]
elif hasattr(model, "n_components"):
return [f"{name}_{x}" for x in range(model.n_components)]
elif hasattr(model, "components_"):
n_components = model.components_.shape[0]
return [f"{name}_{x}" for x in range(n_components)]
elif hasattr(model, "classes_"):
return classes_
else:
return [name] |
py | 1a4914741250fd215a30dadded8acd09c42b5cca | """
This tests the filter functions to ensure that they are
appropriately calculating the filters as expected.
These filter tests operate on the principle that the product of
single power prime integers is always unique, and by extension,
so are their logarithms. Prime number arrays are filtered,
multiplied together, and compared against an expected hard-coded
result.
"""
import numpy as np
import numpy.ma as np_ma
import pytest
import sympy as sy
import math
import ifa_smeargle.core as core
import ifa_smeargle.masking as mask
import ifa_smeargle.testing as test
def test_filter_sigma_value():
""" This tests the filtering of sigma boundaries."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(10,10), index=50)
# Prescribed filtering parameters
# 1 Sigma
sigma_multiple = 1
sigma_iterations = 2
# Create the filter.
test_filter = mask.filter_sigma_value(data_array=test_array,
sigma_multiple=sigma_multiple,
sigma_iterations=sigma_iterations)
# Create a filtered array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '92.7429789714003440708375243748487223136051046'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
def test_filter_percent_truncation():
""" This tests the filtering of percent truncations."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(7,7))
# Prescribed filtering parameters
# The top 35% and bottom 10%.
top_percent = 0.35
bottom_percent = 0.10
# Create the filter.
test_filter = mask.filter_percent_truncation(
data_array=test_array, top_percent=top_percent,
bottom_percent=bottom_percent)
# Create a filtered array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '48.3986809684295405908025212823332315778806862'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
def test_filter_pixel_truncation():
""" This tests the filtering of pixel boundaries."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(7,7))
# Prescribed filtering parameters
# Top 13 pixels and bottom 9.
top_count = 13
bottom_count = 9
# Create the filter.
test_filter = mask.filter_pixel_truncation(data_array=test_array,
top_count=top_count,
bottom_count=bottom_count)
# Create a filtered array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '51.0043131557317283360473320982116998982267737'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
def test_filter_maximum_value():
""" This tests the filtering of values above a maximum."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(7,7))
# Prescribed filtering parameters
# The value 113 should not be masked.
maximum_value = 113
# Create the filter.
test_filter = mask.filter_maximum_value(data_array=test_array,
maximum_value=maximum_value)
# Create a filtered array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '46.4998252465517387337527237516559582272076600'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
def test_filter_minimum_value():
""" This tests the filtering of values below a minimum."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(7,7))
# Prescribed filtering parameters.
# The value 101 itself should not be masked.
minimum_value = 101
# Create the filter.
test_filter = mask.filter_minimum_value(data_array=test_array,
minimum_value=minimum_value)
# Create a filter array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '52.5579255086291590806495158287835916351211866'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
def test_filter_exact_value():
""" This tests the filtering of exact values."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(7,7))
# Prescribed filtering parameters
exact_value = 101
# Create the filter.
test_filter = mask.filter_exact_value(data_array=test_array,
exact_value=exact_value)
# Create a filtered array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '86.9163820638011874618505104537286754939523446'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
def test_filter_invalid_value():
""" This tests the filtering of invalid values."""
# Creating the testing array.
test_array = test.base.create_prime_test_array(shape=(7,7))
# We need to force invalid values as the prime test creation
# does not have them.
test_array = np.array(test_array,dtype=float)
test_array[1:3,2] = np.inf
test_array[2,4:6] = -np.inf
test_array[5,1:6] = np.nan
# Prescribed filtering parameters
pass
# Create the filter.
test_filter = mask.filter_invalid_value(data_array=test_array)
# Create a filtered array for both convince and testing.
test_filtered_array = np_ma.array(test_array, mask=test_filter)
print(test_filtered_array)
# A properly completed filter should have the same product value
# as this number. This is how the filter is checked.
CHECK_STRING = '70.8884174145533646297736729939104459590381610'
CHECK_LOGARITHM = sy.Float(CHECK_STRING)
__, __, product_log10 = core.math.ifas_large_integer_array_product(
integer_array=test_filtered_array.compressed())
# Finally, check. As we are dealing with large single power
# prime composite numbers and long decimals, and the smallest
# factor change of removing the 2 product still changes the
# logarithm enough, checking if the logs are close is good
# enough.
assert_message = ("The check logarithm is: {check} "
"The product logarithm is: {log} "
"The filtered array is: \n {array}"
.format(check=CHECK_LOGARITHM, log=product_log10,
array=test_filtered_array))
assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message
# All done.
return None
|
py | 1a49148e6d2f783e1a1db753b79699dc79b21e9f | import json
import boto3
import sys
from datetime import datetime
from decimal import Decimal
from boto3.dynamodb.conditions import Key, Attr
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
today=datetime.today()
curyear=today.year
curmonth=today.month
curday=today.day
start_of_day = int(datetime(curyear,curmonth,curday,0,0).timestamp())
curtime = int(datetime.today().timestamp())
oneweekbefore = start_of_day - 604800
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb', region_name='eu-west-1')
activitytable = dynamodb.Table('BBAthleteActivities')
athlete_id = get_qp(event,'id')
athlete_activities=activitytable.get_item(Key={'id': athlete_id })
#print(athlete_activities)
aa = athlete_activities.get('Item').get('activities')
filtered_activities = list(filter(lambda x: (int(datetime.strptime(x['Date'],'%Y-%m-%d').strftime("%s")) < oneweekbefore), aa))
logger.debug(filtered_activities);
activitytable.update_item(Key={'id':athlete_id} , UpdateExpression='set activities = :obj', ExpressionAttributeValues={":obj" : filtered_activities })
activitytable.update_item(Key={'id':athlete_id} , UpdateExpression='set lastupdated = :obj', ExpressionAttributeValues={":obj" : oneweekbefore })
return {
'statusCode': 200,
'body': json.dumps("Removed last one week activities for athlete : "+ athlete_id)
}
def get_qp(event,qp):
qpid = None
if event.get('params') is not None :
qpid=event.get('params').get('querystring').get(qp)
return qpid
if __name__ == '__main__':
evnt={ "params": { "querystring": { "id": "9671032" } } }
print(lambda_handler(evnt,None))
|
py | 1a4914f9cc6bee82ec85181d88d7229cde3e6681 | #
# @lc app=leetcode.cn id=946 lang=python3
#
# [946] 验证栈序列
#
from typing import List
class Solution:
# 模拟思路即可
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
try:
if len(pushed) == 0 or len(pushed) == 1:
return True
# 模拟的栈
stack = []
for num in popped:
if len(stack) > 0 and num == stack[-1]:
# 恰好要弹出的元素在栈顶
stack.pop()
elif num in pushed:
# 要弹出的元素,还没入栈, 把该元素之前的所有元素入栈
index = pushed.index(num)
stack.extend(pushed[:index])
pushed = pushed[index+1:]
else:
return False
return True
except Exception as e:
raise e
|
py | 1a491620208a2fabafb35394a390b5a3c94c81a6 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
from azure.core.exceptions import ClientAuthenticationError
if TYPE_CHECKING:
from typing import Any, Iterable, Optional
class CredentialUnavailableError(ClientAuthenticationError):
"""The credential did not attempt to authenticate because required data or state is unavailable."""
class AuthenticationRequiredError(CredentialUnavailableError):
"""Interactive authentication is required to acquire a token.
This error is raised only by interactive user credentials configured not to automatically prompt for user
interaction as needed. Its properties provide additional information that may be required to authenticate. The
control_interactive_prompts sample demonstrates handling this error by calling a credential's "authenticate"
method.
"""
def __init__(self, scopes, message=None, error_details=None, claims=None, **kwargs):
# type: (Iterable[str], Optional[str], Optional[str], Optional[str], **Any) -> None
self._claims = claims
self._scopes = scopes
self._error_details = error_details
if not message:
message = "Interactive authentication is required to get a token. Call 'authenticate' to begin."
super(AuthenticationRequiredError, self).__init__(message=message, **kwargs)
@property
def scopes(self):
# type: () -> Iterable[str]
"""Scopes requested during the failed authentication"""
return self._scopes
@property
def error_details(self):
# type: () -> Optional[str]
"""Additional authentication error details from Azure Active Directory"""
return self._error_details
@property
def claims(self):
# type: () -> Optional[str]
"""Additional claims required in the next authentication"""
return self._claims
|
py | 1a491725237a2cf6f85b1f2e8d2357be9ccc2f62 | import json
from flask import Response
from flask import Blueprint
from flask import request
from financespy import Transaction
from financespy import parse_month
from datetime import date
def month_weeks(backend, year, month):
return [
[trans.to_dict() for trans in week.records()]
for week in backend.month(year=year, month=month).weeks()
]
def month_days(backend, year, month):
return [
[trans.to_dict() for trans in day.records()]
for day in backend
.month(year=year, month=month)
.days()
]
def month_day(backend, year, month, day):
return [
trans.to_dict()
for trans in backend
.month(year=year, month=month)
.day(day).records()
]
def transactions_blueprint(backend, name):
transactions = Blueprint(
"_transactions_",
name,
url_prefix="/api/accounts/<account>/transactions")
@transactions.route("/")
def root(user):
return "It is working for " + user
@transactions.route("/<int:year>/<month>", methods=("GET",))
def month_all(account, year, month):
result = [
trans.to_dict()
for trans in backend.month(year=year, month=month).records()
]
return Response(
json.dumps(result),
mimetype="application/json"
)
@transactions.route("/<int:year>/<month>/<details>", methods=("GET",))
def month_details(account, year, month, details):
if details == "weeks":
result = month_weeks(backend, year, month)
elif details == "days":
result = month_days(backend, year, month)
else:
result = month_day(backend, year, month, int(details))
return Response(
json.dumps(result),
mimetype="application/json"
)
@transactions.route("/<int:year>/<month>/<int:day>", methods=("PUT",))
def insert_record(account, year, month, day):
payload = request.get_json()
transaction = Transaction(
value=payload["value"],
description=payload["description"],
categories=[]
)
backend.insert_record(
date=date(year=year, month=parse_month(month), day=day),
record=transaction
)
return ('', 204)
return transactions
|
py | 1a491738353f3a35b5b2addc6d9cf230164f4f32 | from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 1b7b8092fcef
Revises: ('bd9a3f60b2c3', 'f64288c5fd6f')
Create Date: 2018-04-19 12:05:20.226456
"""
# revision identifiers, used by Alembic.
revision = '1b7b8092fcef'
down_revision = ('bd9a3f60b2c3', 'f64288c5fd6f')
def upgrade():
pass
def downgrade():
pass
|
py | 1a4918021a21958a9f61bd57db929108e4de006a | """Mock documents, used for integration testing."""
from dataclasses import dataclass, replace
from itertools import chain
from pathlib import Path
from typing import List, Optional, Sequence, Tuple
from bp.document import Document
from bp.entity import Page
from bp.build_document import InputPage, build_document
from bp.geometry import BBox, Interval
from bp.ocr import InputWord
MockWord = Tuple[str, Tuple[float, float], Tuple[float, float]]
def _input_word(mock_word: MockWord) -> InputWord:
bbox = BBox(
Interval(mock_word[1][0], mock_word[1][1]),
Interval(mock_word[2][0], mock_word[2][1]))
return InputWord(bbox, mock_word[0], None, None, None)
@dataclass(frozen=True)
class MockPage:
"""A mock page.
Args:
mock_words: The mock words on the page.
bbox: The bounding box of the mock page.
"""
words: Tuple[InputWord, ...]
bbox: BBox
def mock_doc(pages: Sequence[str],
name: Optional[str] = None) -> Document:
"""A mock doc described as an ASCII drawing.
Args:
pages: Every string represents a page of input. See the test code for
examples.
name: A name for the Document. This is mostly for logging/debugging. It
should usually be fine to use the default.
"""
if not pages:
pages = [""]
mock_pages: List[MockPage] = []
offset = 0.0
for page in pages:
mock_words: List[MockWord] = []
lines = page.split('\n')
for line_no, line in enumerate(lines):
start: Optional[int] = None
for i in range(len(line) + 1):
if i < len(line) and line[i] != ' ':
if start is None:
start = i
if i == len(line) or line[i] == ' ':
if start is not None:
word = line[start:i]
mock_word = (
word,
(start, i),
(line_no, line_no + 1))
mock_words += [mock_word]
start = None
page_width = max(len(line) for line in lines) if lines else 0
mock_pages += [
MockPage(tuple(map(lambda W: _input_word(W), mock_words)),
BBox(Interval(0, page_width),
Interval(0 + offset, len(lines) + offset)))]
offset += len(lines)
assert len(pages) == len(mock_pages)
if name is None:
name = ('---page break---').join(pages)
return build_mock_doc(tuple(mock_pages), name=name)
def build_mock_doc(mock_pages: Tuple[MockPage, ...], name: str) -> Document:
input_pages = tuple(InputPage(Page(mock_page.bbox, index + 1), mock_page.words)
for index, mock_page in enumerate(mock_pages))
return build_document(input_pages, name)
|
py | 1a491832ba2d068576845b56923ff9da48c30b3b | import sys
fp=open(sys.argv[1])
word=0
for data in fp:
l=data.split()
for i in l:
word+=1
print('Total No. of words:',word) |
py | 1a49186f90dd13823fb23851acf24ea27078a662 | """Leetcode 448. Find All Numbers Disappeared in an Array
Easy
URL: https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/
Given an array of integers where 1 <= a[i] <= n (n = size of array),
some elements appear twice and others appear once.
Find all the elements of [1, n] inclusive that do not appear in this array.
Could you do it without extra space and in O(n) runtime?
You may assume the returned list does not count as extra space.
Example:
Input:
[4,3,2,7,8,2,3,1]
Output:
[5,6]
"""
class SolutionDistinctNumsSet(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
Time complexity: O(n).
Space complexity: O(n).
"""
if not nums:
return []
# Use set to collect distinct numbers.
distinct_nums = set()
for num in nums:
distinct_nums.add(num)
# Iterate through nums to collect disappeared numbers.
disappeared_nums = []
for i in range(1, len(nums) + 1):
if i not in distinct_nums:
disappeared_nums.append(i)
return disappeared_nums
class SolutionMarkIdxNumNeg(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
Time complexity: O(n).
Space complexity: O(1).
"""
if not nums:
return []
# Use idx=num-1 to mark appeared by updating num[idx]=-num[idx].
for num in nums:
idx = abs(num) - 1
nums[idx] = -abs(nums[idx])
# Return disappeared numbers which are idx's with positive values.
return [i + 1 for i in range(len(nums)) if nums[i] > 0]
def main():
# Output: [5,6]
nums = [4,3,2,7,8,2,3,1]
print SolutionDistinctNumsSet().findDisappearedNumbers(nums)
print SolutionMarkIdxNumNeg().findDisappearedNumbers(nums)
if __name__ == '__main__':
main()
|
py | 1a4918916ce18c9f0b909a390c81af9931f2c164 | import numpy as np
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader
from sklearn.utils import shuffle
class SequenceBucketCollator():
def __init__(self, choose_length, maxlen, sequence_index, length_index, label_index=None):
self.choose_length = choose_length
self.sequence_index = sequence_index
self.length_index = length_index
self.label_index = label_index
self.maxlen = maxlen
def __call__(self, batch):
batch = [torch.stack(x) for x in list(zip(*batch))]
sequences = batch[self.sequence_index]
lengths = batch[self.length_index]
length = self.choose_length(lengths)
mask = torch.arange(start=self.maxlen, end=0, step=-1) < length
padded_sequences = sequences[:, mask]
batch[self.sequence_index] = padded_sequences
if self.label_index is not None:
return [x for i, x in enumerate(batch) if i != self.label_index], batch[self.label_index]
return batch
def make_loader(x_padded, lengths, y, maxlen=236, batch_size=512, is_train=True):
dataset = TensorDataset(x_padded, lengths, torch.tensor(y))
collator = SequenceBucketCollator(lambda length: length.max(),
maxlen=maxlen,
sequence_index=0,
length_index=1,
label_index=2)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collator)
return loader
|
py | 1a4918eb512805846e7c8534c6feec0b90db694c | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import Callable
import jax
import jax.numpy as jn
import numpy as np
import tensorflow as tf # For data augmentation.
import tensorflow_datasets as tfds
from absl import app, flags
from tqdm import tqdm, trange
import objax
from examples.image_classification.tfdata.data import DataSet
from objax.jaxboard import SummaryWriter, Summary
from objax.util import EasyDict
from objax.zoo import convnet, wide_resnet
FLAGS = flags.FLAGS
def augment(x, shift: int):
y = tf.image.random_flip_left_right(x['image'])
y = tf.pad(y, [[shift] * 2, [shift] * 2, [0] * 2], mode='REFLECT')
return dict(image=tf.image.random_crop(y, tf.shape(x['image'])), label=x['label'])
# We make our own TrainLoop to be reusable
class TrainLoop(objax.Module):
predict: Callable
train_op: Callable
def __init__(self, nclass: int, **kwargs):
self.nclass = nclass
self.params = EasyDict(kwargs)
def train_step(self, summary: Summary, data: dict, progress: np.ndarray):
kv = self.train_op(progress, data['image'].numpy(), data['label'].numpy())
for k, v in kv.items():
if jn.isnan(v):
raise ValueError('NaN, try reducing learning rate', k)
summary.scalar(k, float(v))
def train(self, num_train_epochs: int, train_size: int, train: DataSet, test: DataSet, logdir: str):
checkpoint = objax.io.Checkpoint(logdir, keep_ckpts=5, makedir=True)
start_epoch, last_ckpt = checkpoint.restore(self.vars())
train_iter = iter(train)
progress = np.zeros(jax.local_device_count(), 'f') # for multi-GPU
with SummaryWriter(os.path.join(logdir, 'tb')) as tensorboard:
for epoch in range(start_epoch, num_train_epochs):
with self.vars().replicate():
# Train
summary = Summary()
loop = trange(0, train_size, self.params.batch,
leave=False, unit='img', unit_scale=self.params.batch,
desc='Epoch %d/%d' % (1 + epoch, num_train_epochs))
for step in loop:
progress[:] = (step + (epoch * train_size)) / (num_train_epochs * train_size)
self.train_step(summary, next(train_iter), progress)
# Eval
accuracy, total = 0, 0
for data in tqdm(test, leave=False, desc='Evaluating'):
total += data['image'].shape[0]
preds = np.argmax(self.predict(data['image'].numpy()), axis=1)
accuracy += (preds == data['label'].numpy()).sum()
accuracy /= total
summary.scalar('eval/accuracy', 100 * accuracy)
print('Epoch %04d Loss %.2f Accuracy %.2f' % (epoch + 1, summary['losses/xe'](),
summary['eval/accuracy']()))
tensorboard.write(summary, step=(epoch + 1) * train_size)
checkpoint.save(self.vars(), epoch + 1)
# We inherit from the training loop and define predict and train_op.
class TrainModule(TrainLoop):
def __init__(self, model: Callable, nclass: int, **kwargs):
super().__init__(nclass, **kwargs)
self.model = model(3, nclass)
model_vars = self.model.vars()
self.opt = objax.optimizer.Momentum(model_vars)
self.ema = objax.optimizer.ExponentialMovingAverage(model_vars, momentum=0.999, debias=True)
print(model_vars)
def loss(x, label):
logit = self.model(x, training=True)
loss_wd = 0.5 * sum((v.value ** 2).sum() for k, v in model_vars.items() if k.endswith('.w'))
loss_xe = objax.functional.loss.cross_entropy_logits(logit, label).mean()
return loss_xe + loss_wd * self.params.weight_decay, {'losses/xe': loss_xe, 'losses/wd': loss_wd}
gv = objax.GradValues(loss, model_vars)
def train_op(progress, x, y):
g, v = gv(x, y)
lr = self.params.lr * jn.cos(progress * (7 * jn.pi) / (2 * 8))
self.opt(lr, objax.functional.parallel.pmean(g))
self.ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]})
def predict_op(x):
return objax.functional.softmax(self.model(x, training=False))
self.predict = objax.Parallel(self.ema.replace_vars(predict_op), model_vars + self.ema.vars())
self.train_op = objax.Parallel(train_op, self.vars(), reduce=lambda x: x[0])
def network(arch: str):
if arch == 'cnn32-3-max':
return functools.partial(convnet.ConvNet, scales=3, filters=32, filters_max=1024,
pooling=objax.functional.max_pool_2d)
elif arch == 'cnn32-3-mean':
return functools.partial(convnet.ConvNet, scales=3, filters=32, filters_max=1024,
pooling=objax.functional.average_pool_2d)
elif arch == 'cnn64-3-max':
return functools.partial(convnet.ConvNet, scales=3, filters=64, filters_max=1024,
pooling=objax.functional.max_pool_2d)
elif arch == 'cnn64-3-mean':
return functools.partial(convnet.ConvNet, scales=3, filters=64, filters_max=1024,
pooling=objax.functional.average_pool_2d)
elif arch == 'wrn28-1':
return functools.partial(wide_resnet.WideResNet, depth=28, width=1)
elif arch == 'wrn28-2':
return functools.partial(wide_resnet.WideResNet, depth=28, width=2)
raise ValueError('Architecture not recognized', arch)
def main(argv):
del argv
# In this example we use tensorflow_datasets for loading cifar10, but you can use any dataset library you like.
tf.config.experimental.set_visible_devices([], "GPU")
DATA_DIR = os.path.join(os.environ['HOME'], 'TFDS')
data, info = tfds.load(name='cifar10', split='train', data_dir=DATA_DIR, with_info=True)
train_size = info.splits['train'].num_examples
image_shape = info.features['image'].shape
nclass = info.features['label'].num_classes
train = DataSet.from_tfds(data, image_shape, augment_fn=lambda x: augment(x, 4))
test = DataSet.from_tfds(tfds.load(name='cifar10', split='test', data_dir=DATA_DIR), image_shape)
train = train.cache().shuffle(8192).repeat().parse().augment().batch(FLAGS.batch)
train = train.nchw().one_hot(nclass).prefetch(16)
test = test.cache().parse().batch(FLAGS.batch).nchw().prefetch(16)
del data, info
# Define the network and train_it
loop = TrainModule(network(FLAGS.arch), nclass=nclass,
arch=FLAGS.arch,
lr=FLAGS.lr,
batch=FLAGS.batch,
epochs=FLAGS.epochs,
weight_decay=FLAGS.weight_decay)
logdir = '%s/%s' % (loop.__class__.__name__, '_'.join(sorted('%s_%s' % k for k in loop.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
print(f'Saving to {logdir}')
print(f'Visualize results with:\n tensorboard --logdir {FLAGS.logdir}')
loop.train(FLAGS.epochs, train_size, train, test, logdir)
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ['cnn32-3-max', 'cnn32-3-mean',
'cnn64-3-max', 'cnn64-3-mean',
'wrn28-1', 'wrn28-2'],
'Model architecture.')
flags.DEFINE_float('lr', 0.1, 'Learning rate.')
flags.DEFINE_float('weight_decay', 0.0005, 'Weight decay ratio.')
flags.DEFINE_integer('batch', 256, 'Batch size')
flags.DEFINE_integer('epochs', 1000, 'Training duration in number of epochs.')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
app.run(main)
|
py | 1a491971096f9bb068b0afb7442d832835f0424f | """
This module implements a set of utilities for generating revert datasets from
the command-line. When the mwreverts python package is installed, a
`mwreverts` utility should be available from the
command-line. Run `mwreverts -h` for more information:
mwreverts dump2reverts
++++++++++++++++++++++
.. automodule:: mwreverts.utilities.dump2reverts
:noindex:
mwreverts revdocs2reverts
+++++++++++++++++++++++++
.. automodule:: mwreverts.utilities.revdocs2reverts
:noindex:
"""
from .dump2reverts import dump2reverts
from .revdocs2reverts import revdocs2reverts
__all__ = [dump2reverts, revdocs2reverts]
|
py | 1a4919d48bfadc693508bd7b8e49591771348acf | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
ren1.SetBackground(0,0,0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300,300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# camera parameters
camera = ren1.GetActiveCamera()
camera.SetPosition(-54.8012,109.471,231.412)
camera.SetFocalPoint(33,33,33)
camera.SetViewUp(0.157687,0.942832,-0.293604)
camera.SetViewAngle(30)
camera.SetClippingRange(124.221,363.827)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/ironProt_ascii.case")
Contour0 = vtk.vtkContourFilter()
Contour0.SetInputConnection(reader.GetOutputPort())
Contour0.SetValue(0,200)
Contour0.SetComputeScalars(1)
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(Contour0.GetOutputPort())
mapper.SetImmediateModeRendering(1)
mapper.SetScalarRange(0,1)
mapper.SetScalarVisibility(1)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetRepresentationToSurface()
actor.GetProperty().SetInterpolationToGouraud()
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
py | 1a491a1788e5ce37aadc5aecb52102b8af18525e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.core.worker.consumer`."""
import logging
import os
import shutil
import signal
import subprocess
import tempfile
import time
import pytest
import orion.core.io.experiment_builder as experiment_builder
import orion.core.io.resolve_config as resolve_config
import orion.core.utils.backward as backward
import orion.core.worker.consumer as consumer
from orion.core.utils import sigterm_as_interrupt
from orion.core.utils.exceptions import BranchingEvent, MissingResultFile
from orion.core.utils.format_trials import tuple_to_trial
Consumer = consumer.Consumer
@pytest.fixture
def config(exp_config):
"""Return a configuration."""
config = exp_config[0][0]
config["metadata"]["user_args"] = ["--x~uniform(-50, 50)"]
config["metadata"]["VCS"] = resolve_config.infer_versioning_metadata(
config["metadata"]["user_script"]
)
config["name"] = "exp"
config["working_dir"] = "/tmp/orion"
backward.populate_space(config)
config["space"] = config["metadata"]["priors"]
return config
@pytest.mark.usefixtures("storage")
def test_trials_interrupted_sigterm(config, monkeypatch):
"""Check if a trial is set as interrupted when a signal is raised."""
def mock_popen(self, *args, **kwargs):
os.kill(os.getpid(), signal.SIGTERM)
exp = experiment_builder.build(**config)
monkeypatch.setattr(subprocess.Popen, "wait", mock_popen)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial)
con = Consumer(exp)
with pytest.raises(KeyboardInterrupt):
with sigterm_as_interrupt():
con(trial)
shutil.rmtree(trial.working_dir)
@pytest.mark.usefixtures("storage")
def test_trial_working_dir_is_created(config):
"""Check that trial working dir is created."""
exp = experiment_builder.build(**config)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial, status="reserved")
assert not os.path.exists(trial.working_dir)
con = Consumer(exp)
con(trial)
assert os.path.exists(trial.working_dir)
shutil.rmtree(trial.working_dir)
def setup_code_change_mock(config, monkeypatch, ignore_code_changes):
"""Mock create experiment and trials, and infer_versioning_metadata"""
exp = experiment_builder.build(**config)
trial = tuple_to_trial((1.0,), exp.space)
exp.register_trial(trial, status="reserved")
con = Consumer(exp, ignore_code_changes=ignore_code_changes)
def code_changed(user_script):
return dict(
type="git",
is_dirty=True,
HEAD_sha="changed",
active_branch="new_branch",
diff_sha="new_diff",
)
monkeypatch.setattr(consumer, "infer_versioning_metadata", code_changed)
return con, trial
@pytest.mark.usefixtures("storage")
def test_code_changed_evc_disabled(config, monkeypatch, caplog):
"""Check that trial has its working_dir attribute changed."""
con, trial = setup_code_change_mock(config, monkeypatch, ignore_code_changes=True)
with caplog.at_level(logging.WARNING):
con(trial)
assert "Code changed between execution of 2 trials" in caplog.text
shutil.rmtree(trial.working_dir)
@pytest.mark.usefixtures("storage")
def test_code_changed_evc_enabled(config, monkeypatch):
"""Check that trial has its working_dir attribute changed."""
con, trial = setup_code_change_mock(config, monkeypatch, ignore_code_changes=False)
with pytest.raises(BranchingEvent) as exc:
con(trial)
assert exc.match("Code changed between execution of 2 trials")
shutil.rmtree(trial.working_dir)
@pytest.mark.usefixtures("storage")
def test_retrieve_result_nofile(config):
"""Test retrieve result"""
results_file = tempfile.NamedTemporaryFile(
mode="w", prefix="results_", suffix=".log", dir=".", delete=True
)
exp = experiment_builder.build(**config)
con = Consumer(exp)
with pytest.raises(MissingResultFile) as exec:
con.retrieve_results(results_file)
results_file.close()
assert exec.match(r"Cannot parse result file")
|
py | 1a491a7c08860c4d233547f6f43cb476b3901675 | #!/usr/bin/env python
import json
from run_tests_stats import execute
import optparse
import os
import subprocess
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def parseresults(log_file, plot_data, t, duration):
fp = open(log_file).readlines()
i = 0
plot_data[t] = {}
plot_data[t]['tot_ops'] = []
plot_data[t]['abrt_ratio'] = []
plot_data[t]['ckp_builder_start'] = []
plot_data[t]['writeback_spent'] = []
plot_data[t]['blocking_spent'] = []
plot_data[t]['ckp_quiescence_spent'] = []
plot_data[t]['ckp_scan_spent'] = []
plot_data[t]['ckp_builder_spent'] = []
plot_data[t]['ckp_barrier_spent'] = []
plot_data[t]['max_last_objs'] = []
plot_data[t]['avg_last_objs'] = []
plot_data[t]['ckp_builder_by_wakeup'] = []
for line in fp:
if i <= 1:
i += 1
continue
w = line.split()
if not w:
break
thd = (w[2])
tot_ops = w[3]
plot_data[t]['tot_ops'].append(float(tot_ops)/duration/1000)
abrts = (w[5])
plot_data[t]['abrt_ratio'].append(float(abrts)/(float(abrts)+float(tot_ops)))
ckp_builder_start = (w[6])
writeback_spent = (w[7])
blocking_spent = (w[8])
ckp_quiescence_spent = (w[9])
ckp_scan_spent = (w[10])
ckp_builder_spent = (w[11])
ckp_barrier_spent = (w[12])
max_last_objs = (w[13])
avg_last_objs = (w[14])
ckp_builder_by_wakeup = (w[15])
plot_data[t]['ckp_builder_start'].append(ckp_builder_start)
plot_data[t]['writeback_spent'].append(writeback_spent)
plot_data[t]['blocking_spent'].append(blocking_spent)
plot_data[t]['ckp_quiescence_spent'].append(ckp_quiescence_spent)
plot_data[t]['ckp_scan_spent'].append(ckp_scan_spent)
plot_data[t]['ckp_builder_spent'].append(ckp_builder_spent)
plot_data[t]['ckp_barrier_spent'].append(ckp_barrier_spent)
plot_data[t]['max_last_objs'].append(max_last_objs)
plot_data[t]['avg_last_objs'].append(avg_last_objs)
plot_data[t]['ckp_builder_by_wakeup'].append(ckp_builder_by_wakeup)
#print thd
#print tot_ops
def plotgraph(plot_data, threads, update_rate, data_structure, initial_size, graph_type, final_dir):
fig = plt.figure()
title = data_structure + '_' + graph_type + '_u' + str(update_rate) + '_i' + str(initial_size)
fig.suptitle(title)
ax = fig.add_subplot(111)
for keys in plot_data:
ax.plot(threads, plot_data[keys][graph_type], marker='o', linestyle='-', label = keys )
ax.set_xlabel('threads')
if graph_type == 'tot_ops':
ax.set_ylabel('Ops/us')
else:
ax.set_ylabel('Abort Ratio')
ax.legend(loc = 'upper left')
#plt.show()
fig.savefig(final_dir+title+'.png')
parser = optparse.OptionParser()
parser.add_option("-d", "--dest", default = "temp",
help = "destination folder")
(opts, args) = parser.parse_args()
#Create result directory
result_dir = "./results/" + opts.dest + "/"
try:
os.stat(result_dir)
except:
os.makedirs(result_dir)
#Make benches
status = subprocess.check_output('make clean -C ../src/; make -C ../src/', shell=True)
#Read config files
with open('config.json') as json_data_file:
data = json.load(json_data_file)
for test in data:
if data[test][0]["data_structure"] == "llist":
if data[test][0]["buckets"] != 1:
sys.exit("Buckets should be 1\n");
for ur in data[test][0]["update_rate"]:
final_dir = result_dir + test + "/u" + str(ur) + "/";
try:
os.stat(final_dir)
except:
os.makedirs(final_dir)
plot_data = {}
for t in data[test][0]["alg_type"]:
out_file = final_dir + "__" + t + "_" +data[test][0]["data_structure"] + "_" + str(data[test][0]["initial_size"]) + "_u" + str(ur) + ".txt"
execute(data[test][0]["runs_per_test"], data[test][0]["rlu_max_ws"], data[test][0]["buckets"], data[test][0]["duration"], \
t, ur, data[test][0]["initial_size"], data[test][0]["range_size"], out_file, data[test][0]["threads"])
parseresults(out_file, plot_data, t, data[test][0]["duration"])
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'tot_ops', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'abrt_ratio', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_builder_start', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'writeback_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'blocking_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_quiescence_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_scan_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_builder_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_barrier_spent', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'max_last_objs', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'avg_last_objs', final_dir)
plotgraph(plot_data, data[test][0]["threads"], ur, data[test][0]["data_structure"], data[test][0]["initial_size"], 'ckp_builder_by_wakeup', final_dir)
|
py | 1a491aea037339ecf712a80e17a6ef79daff29cf | """Descriptions of English, used for building language models.
Language models are for understanding what English looks like, for help with
cipher breaking.
* `count_1l.txt`: counts of single letters
* `count_2l.txt`: counts of pairs letters, bigrams
* `count_3l.txt`: counts of triples of letters, triagrams
* `words.txt`: a dictionary of words, used for keyword-based cipher breaking.
These words should only contain characters cointained in
`string.ascii_letters`.
See [`szyfrow/language_models`](../support/language_models.html) for how these files are used.
""" |
py | 1a491c335ab58f57dd8681afa400b215cb9caf73 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Instantiate the postgres component
"""
def test():
# import journal
# journal.debug("postgres.init").active = True
# journal.debug("postgres.execute").active = True
# journal.debug("postgres.connection").active = True
# access the bizbook package
import bizbook
# build a database component
db = bizbook.pg()
# check that we are connected to the right database
assert db.database == 'bizbook'
# tell postgres to shut up
db.execute("SET client_min_messages = warning;")
# build the tables
db.createTable(bizbook.schema.Location)
db.createTable(bizbook.schema.Person)
db.createTable(bizbook.schema.Publisher)
db.createTable(bizbook.schema.Address)
db.createTable(bizbook.schema.ContactMethod)
db.createTable(bizbook.schema.Staff)
db.createTable(bizbook.schema.Book)
db.createTable(bizbook.schema.Author)
db.createTable(bizbook.schema.Editor)
db.createTable(bizbook.schema.Invoice)
db.createTable(bizbook.schema.InvoiceItem)
# and return the component
return db
# main
if __name__ == "__main__":
test()
# end of file
|
py | 1a491c339f986a0009a6052b8fa1dbb045287f49 | import os
import zipfile
from unittest import mock
import pytest
from requests import exceptions as requests_exceptions
from briefcase.commands.create import InvalidSupportPackage
from briefcase.exceptions import NetworkFailure
def test_install_app_support_package(create_command, myapp, tmp_path, support_path):
"A support package can be downloaded and unpacked where it is needed"
# Write a temporary support zip file
support_file = tmp_path / 'out.zip'
with zipfile.ZipFile(support_file, 'w') as support_zip:
support_zip.writestr('internal/file.txt', data='hello world')
# Modify download_url to return the temp zipfile
create_command.download_url = mock.MagicMock(return_value=support_file)
# Install the support package
create_command.install_app_support_package(myapp)
# Confirm the right URL was used
create_command.download_url.assert_called_with(
download_path=create_command.dot_briefcase_path / 'support',
url='https://briefcase-support.org/python?platform=tester&version=3.X',
)
# Confirm that the full path to the support file
# has been unpacked.
assert (support_path / 'internal' / 'file.txt').exists()
def test_install_pinned_app_support_package(create_command, myapp, tmp_path, support_path):
"A pinned support package can be downloaded and unpacked where it is needed"
# Pin the support revision
myapp.support_revision = '42'
# Write a temporary support zip file
support_file = tmp_path / 'out.zip'
with zipfile.ZipFile(support_file, 'w') as support_zip:
support_zip.writestr('internal/file.txt', data='hello world')
# Modify download_url to return the temp zipfile
create_command.download_url = mock.MagicMock(return_value=support_file)
# Install the support package
create_command.install_app_support_package(myapp)
# Confirm the right URL was used
create_command.download_url.assert_called_with(
download_path=create_command.dot_briefcase_path / 'support',
url='https://briefcase-support.org/python?platform=tester&version=3.X&revision=42',
)
# Confirm that the full path to the support file
# has been unpacked.
assert (support_path / 'internal' / 'file.txt').exists()
def test_install_custom_app_support_package_file(create_command, myapp, tmp_path, support_path):
"A custom support package can be specified as a local file"
# Provide an app-specific override of the package URL
myapp.support_package = os.fsdecode(tmp_path / 'custom' / 'support.zip')
# Write a temporary support zip file
support_file = tmp_path / 'custom' / 'support.zip'
support_file.parent.mkdir(parents=True)
with zipfile.ZipFile(support_file, 'w') as support_zip:
support_zip.writestr('internal/file.txt', data='hello world')
# Modify download_url to return the temp zipfile
create_command.download_url = mock.MagicMock()
# Install the support package
create_command.install_app_support_package(myapp)
# There should have been no download attempt,
# as the resource is local.
create_command.download_url.assert_not_called()
# Confirm that the full path to the support file
# has been unpacked.
assert (support_path / 'internal' / 'file.txt').exists()
def test_install_custom_app_support_package_url(create_command, myapp, tmp_path, support_path):
"A custom support package can be specified as URL"
# Provide an app-specific override of the package URL
myapp.support_package = 'https://example.com/custom/support.zip'
# Write a temporary support zip file
support_file = tmp_path / 'out.zip'
with zipfile.ZipFile(support_file, 'w') as support_zip:
support_zip.writestr('internal/file.txt', data='hello world')
# Modify download_url to return the temp zipfile
create_command.download_url = mock.MagicMock(return_value=support_file)
# Install the support package
create_command.install_app_support_package(myapp)
# Confirm the right URL was used
create_command.download_url.assert_called_with(
download_path=create_command.dot_briefcase_path / 'support',
url='https://example.com/custom/support.zip',
)
# Confirm that the full path to the support file
# has been unpacked.
assert (support_path / 'internal' / 'file.txt').exists()
def test_install_pinned_custom_app_support_package_url(create_command, myapp, tmp_path, support_path):
"A custom support package can be specified as URL, and pinned to a revision"
# Pin the support revision
myapp.support_revision = '42'
# Provide an app-specific override of the package URL
myapp.support_package = 'https://example.com/custom/support.zip'
# Write a temporary support zip file
support_file = tmp_path / 'out.zip'
with zipfile.ZipFile(support_file, 'w') as support_zip:
support_zip.writestr('internal/file.txt', data='hello world')
# Modify download_url to return the temp zipfile
create_command.download_url = mock.MagicMock(return_value=support_file)
# Install the support package
create_command.install_app_support_package(myapp)
# Confirm the right URL was used
create_command.download_url.assert_called_with(
download_path=create_command.dot_briefcase_path / 'support',
url='https://example.com/custom/support.zip?revision=42',
)
# Confirm that the full path to the support file
# has been unpacked.
assert (support_path / 'internal' / 'file.txt').exists()
def test_install_pinned_custom_app_support_package_url_with_args(create_command, myapp, tmp_path, support_path):
"A custom support package can be specified as URL with args, and pinned to a revision"
# Pin the support revision
myapp.support_revision = '42'
# Provide an app-specific override of the package URL
myapp.support_package = 'https://example.com/custom/support.zip?cool=Yes'
# Write a temporary support zip file
support_file = tmp_path / 'out.zip'
with zipfile.ZipFile(support_file, 'w') as support_zip:
support_zip.writestr('internal/file.txt', data='hello world')
# Modify download_url to return the temp zipfile
create_command.download_url = mock.MagicMock(return_value=support_file)
# Install the support package
create_command.install_app_support_package(myapp)
# Confirm the right URL was used
create_command.download_url.assert_called_with(
download_path=create_command.dot_briefcase_path / 'support',
url='https://example.com/custom/support.zip?cool=Yes&revision=42',
)
# Confirm that the full path to the support file
# has been unpacked.
assert (support_path / 'internal' / 'file.txt').exists()
def test_offline_install(create_command, myapp, support_path):
"If the computer is offline, an error is raised"
create_command.download_url = mock.MagicMock(
side_effect=requests_exceptions.ConnectionError
)
# Installing while offline raises an error
with pytest.raises(NetworkFailure):
create_command.install_app_support_package(myapp)
def test_invalid_support_package(create_command, myapp, tmp_path, support_path):
"If the support package isn't a valid zipfile, an error is raised"
# Create a support package that isn't a zipfile
support_file = tmp_path / 'out.zip'
with open(support_file, 'w') as bad_support_zip:
bad_support_zip.write("This isn't a zip file")
# Make the download URL return the temp file
create_command.download_url = mock.MagicMock(return_value=support_file)
# Installing the bad support package raises an error
with pytest.raises(InvalidSupportPackage):
create_command.install_app_support_package(myapp)
def test_missing_support_package(create_command, myapp, tmp_path, support_path):
"If the path provided for the support package is bad, an error is raised"
# Set a custom support package that doesn't exist
myapp.support_package = '/path/does/not/exist.zip'
# Installing the bad support package raises an error
with pytest.raises(InvalidSupportPackage):
create_command.install_app_support_package(myapp)
|
py | 1a491c561b6ede86d0aa19627db0eaac09fd451c | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.traits_editor import TraitsEditor
from traits.api import Bool
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.loggable import Loggable
def grouped_name(names, delimiter='-'):
s = names[0]
e = names[-1]
if s != e:
if all([delimiter in x for x in names]):
prev = None
for x in names:
nx = x.split(delimiter)
h, t = delimiter.join(nx[:-1]), nx[-1]
if prev and prev != h:
break
prev = h
else:
s = names[0]
e = names[-1].split(delimiter)[-1]
s = '{} - {}'.format(s, e)
return s
try:
class BaseTraitsEditor(TraitsEditor, Loggable):
dirty = Bool(False)
_destroyed = False
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.init_logger()
def prepare_destroy(self):
pass
def destroy(self):
self._destroyed = True
self.prepare_destroy()
super().destroy()
except TypeError:
# documentation auto doc hack
class BaseTraitsEditor:
pass
# ============= EOF =============================================
|
py | 1a491c56af326ffb7063bd34210fba8944095729 | import vertx
from core.event_bus import EventBus
# Our application config - you can maintain it here or alternatively you could
# stick it in a conf.json text file and specify that on the command line when
# starting this verticle
# Configuration for the web server
web_server_conf = {
# Normal web server stuff
'port': 8080,
'host': 'localhost',
'ssl': True,
# Configuration for the event bus client side bridge
# This bridges messages from the client side to the server side event bus
'bridge': True,
# This defines which messages from the client we will let through
# to the server side
'inbound_permitted': [
# Allow calls to login
{
'address': 'vertx.basicauthmanager.login'
},
# Allow calls to get static album data from the persistor
{
'address': 'vertx.mongopersistor',
'match': {
'action': 'find',
'collection': 'albums'
}
},
# And to place orders
{
'address': 'vertx.mongopersistor',
'requires_auth': True, # User must be logged in to send let these through
'match': {
'action': 'save',
'collection': 'orders'
}
}
],
# This defines which messages from the server we will let through to the client
'outbound_permitted': [
{}
]
}
# And when it's deployed run a script to load it with some reference
# data for the demov
def deploy_handler(err, id):
if err is None:
# Load the static data
import static_data
else:
print 'Failed to deploy %s' % err
# Now we deploy the modules that we need
# Deploy a MongoDB persistor module
vertx.deploy_module('io.vertx~mod-mongo-persistor~2.0.0-final', handler=deploy_handler)
# Deploy an auth manager to handle the authentication
vertx.deploy_module('io.vertx~mod-auth-mgr~2.0.0-final')
# Start the web server, with the config we defined above
vertx.deploy_module('io.vertx~mod-web-server~2.0.0-final', web_server_conf)
|
py | 1a491cd1eade09a73d7dca9585a3e51523701b6b | """M2 for data processing from extracts (COMSOL)"""
# an example of post-processing state file from ParaView
# state file generated using paraview version 5.8.0
# ----------------------------------------------------------------
# setup views used in the visualization
# ----------------------------------------------------------------
# trace generated using paraview version 5.8.0
#
# To ensure correct image size when batch processing, please search
# for and uncomment the line `# renderView*.ViewSize = [*,*]`
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# get the material library
materialLibrary1 = GetMaterialLibrary()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [972, 755]
renderView1.InteractionMode = '2D'
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.StereoType = 'Crystal Eyes'
renderView1.CameraPosition = [0.0, 0.0, 10000.0]
renderView1.CameraFocalDisk = 1.0
renderView1.CameraParallelScale = 0.7071057629520495
renderView1.BackEnd = 'OSPRay raycaster'
renderView1.OSPRayMaterialLibrary = materialLibrary1
SetActiveView(None)
# ----------------------------------------------------------------
# setup view layouts
# ----------------------------------------------------------------
# create new layout object 'Layout #1'
layout1 = CreateLayout(name='Layout #1')
layout1.AssignView(0, renderView1)
# ----------------------------------------------------------------
# restore active view
SetActiveView(renderView1)
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
# create a new 'XML Unstructured Grid Reader'
velocityc51vtu = XMLUnstructuredGridReader(FileName="""'D:\\Engineering\\PhD\\Charming\\Publishing\\1_MANUSCRIPT_framework\\A_Manuscript\\5_revision_3\
ew_submit\\A_submission\\Supporting_information\\sample_comsol_v.vtu'""")
velocityc51vtu.PointArrayStatus = ['IsoLevel']
# ----------------------------------------------------------------
# setup the visualization in view 'renderView1'
# ----------------------------------------------------------------
# show data from velocityc51vtu
velocityc51vtuDisplay = Show(velocityc51vtu, renderView1, 'UnstructuredGridRepresentation')
# get color transfer function/color map for 'IsoLevel'
isoLevelLUT = GetColorTransferFunction('IsoLevel')
isoLevelLUT.RGBPoints = [2.5612799973882074e-05, 0.231373, 0.298039, 0.752941, 1.04736280639947, 0.865003, 0.865003, 0.865003, 2.094699999998966, 0.705882, 0.0156863, 0.14902]
isoLevelLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'IsoLevel'
isoLevelPWF = GetOpacityTransferFunction('IsoLevel')
isoLevelPWF.Points = [2.5612799973882074e-05, 0.0, 0.5, 0.0, 2.094699999998966, 1.0, 0.5, 0.0]
isoLevelPWF.ScalarRangeInitialized = 1
# trace defaults for the display properties.
velocityc51vtuDisplay.Representation = 'Surface'
velocityc51vtuDisplay.ColorArrayName = ['POINTS', 'IsoLevel']
velocityc51vtuDisplay.LookupTable = isoLevelLUT
velocityc51vtuDisplay.OSPRayScaleArray = 'IsoLevel'
velocityc51vtuDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
velocityc51vtuDisplay.SelectOrientationVectors = 'IsoLevel'
velocityc51vtuDisplay.ScaleFactor = 0.1
velocityc51vtuDisplay.SelectScaleArray = 'IsoLevel'
velocityc51vtuDisplay.GlyphType = 'Arrow'
velocityc51vtuDisplay.GlyphTableIndexArray = 'IsoLevel'
velocityc51vtuDisplay.GaussianRadius = 0.005
velocityc51vtuDisplay.SetScaleArray = ['POINTS', 'IsoLevel']
velocityc51vtuDisplay.ScaleTransferFunction = 'PiecewiseFunction'
velocityc51vtuDisplay.OpacityArray = ['POINTS', 'IsoLevel']
velocityc51vtuDisplay.OpacityTransferFunction = 'PiecewiseFunction'
velocityc51vtuDisplay.DataAxesGrid = 'GridAxesRepresentation'
velocityc51vtuDisplay.PolarAxes = 'PolarAxesRepresentation'
velocityc51vtuDisplay.ScalarOpacityFunction = isoLevelPWF
velocityc51vtuDisplay.ScalarOpacityUnitDistance = 0.03261513739238651
# init the 'PiecewiseFunction' selected for 'OSPRayScaleFunction'
velocityc51vtuDisplay.OSPRayScaleFunction.Points = [0.008018268893823101, 0.0, 0.5, 0.0, 2.3, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
velocityc51vtuDisplay.ScaleTransferFunction.Points = [2.5612799973882074e-05, 0.0, 0.5, 0.0, 2.094699999998966, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
velocityc51vtuDisplay.OpacityTransferFunction.Points = [2.5612799973882074e-05, 0.0, 0.5, 0.0, 2.094699999998966, 1.0, 0.5, 0.0]
# setup the color legend parameters for each legend in this view
# get color legend/bar for isoLevelLUT in view renderView1
isoLevelLUTColorBar = GetScalarBar(isoLevelLUT, renderView1)
isoLevelLUTColorBar.WindowLocation = 'AnyLocation'
isoLevelLUTColorBar.Title = 'IsoLevel'
isoLevelLUTColorBar.ComponentTitle = ''
isoLevelLUTColorBar.ScalarBarLength = 0.7008439897698198
# set color bar visibility
isoLevelLUTColorBar.Visibility = 1
# show color legend
velocityc51vtuDisplay.SetScalarBarVisibility(renderView1, True)
# ----------------------------------------------------------------
# setup color maps and opacity mapes used in the visualization
# note: the Get..() functions create a new object, if needed
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# finally, restore active source
SetActiveSource(velocityc51vtu)
# ----------------------------------------------------------------
|
py | 1a491e927e109e2aad5143a96076d20648048994 | # coding:utf-8
# 2019/9/21
import sys
sys.path.append(r"C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier")
import logging
import pickle
import os
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from keras import backend as K
import numpy as np
import tensorflow as tf
from tool import util, ui_MainWindow, ui_ModelAddDialog, ui_ModelAddDialogChild, helpDialog
# import classifier_collection as cc
# import test_image_classifier as tic
from preprocessing import preprocessing_factory
r"""ui标签转换
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_MainWindow.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_MainWindow.ui
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialog.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialog.ui
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialogChild.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\ui_ModelAddDialogChild.ui
pyuic5 -o C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\helpDialog.py C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\tool\helpDialog.ui
my tensorflow install path: C:/Users/Yauno/AppData/Local/conda/conda/envs/tensorflow
qtdesigner install path: C:\Users\Yauno\AppData\Local\conda\conda\envs\tensorflow\Lib\site-packages\pyqt5_tools\Qt\bin
"""
# 日志设置
LOGGER_PATH = r"C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\log"
logger = util.getLogger(LOGGER_PATH)
logger.setLevel(logging.DEBUG) # 设置日志级别,设置INFO时时DEBUG不可见
# 配置设置
CONFIG_PATH = r"C:\Study\github\Lookoops\MachineLearning\TensorFlow\image-clssifier\GUI\data\conf.txt"
CONF_MODEL_LIST_NAME = "modelList"
DEFAULT_LOAD_DIR = "defaultLoadDir"
PREDICT_MODEL_PATH = "" # 融合模型的路径
# MODEL_LIST = cc.getModelList() # 模型列表
MODEL_LIST = [['vgg_16', 'vgg_16/fc8/squeezed:0', 224], ['inception_v3', 'InceptionV3/Predictions/Reshape_1:0', 299], ['pnasnet_large', 'final_layer/predictions:0', 331], ['resnet_v2_200', 'resnet_v2_200/predictions/Reshape_1:0', 224], ['inception_resnet_v2', 'InceptionResnetV2/Logits/Predictions:0', 299]]
LABEL_MAPPING_PATH = None # 标签映射路径
GRAPH_DIR = None # 图模型路径
class MyWindow(QMainWindow, ui_MainWindow.Ui_MainWindow):
"""主窗口"""
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
self.predictPic = None
self.graphDir = GRAPH_DIR
self.stackingModelPath = PREDICT_MODEL_PATH
self.gap = 6 # 预测偏差
self.labelMapPath = LABEL_MAPPING_PATH
self.picDefaultLoadDir = 'c:\\'
self.mainWindowIcon = "./data/icon.png"
self.initMainWindow()
def initMainWindow(self):
"""主窗口初始化"""
self.setupUi(self)
self.topWidget = QWidget()
self.setWindowIcon(QIcon(self.mainWindowIcon))
self.initComboBox()
self.initData()
self.menubar.triggered[QAction].connect(self.processtrigger) # 菜单栏触发
self.loadPic.clicked.connect(self.getFile) # 图片加载
self.reset.clicked.connect(self.resetFunc) # 重置按钮
self.predict.clicked.connect(self.predictFunc) # 预测按钮
def initComboBox(self):
"""训练模型下拉框初始化, 从设置中读取配置"""
conf = util.getConfig(CONFIG_PATH)
modelList = conf.options(CONF_MODEL_LIST_NAME)
for m in modelList:
curModelPath = conf.get(CONF_MODEL_LIST_NAME, m)
self.comboBox.addItem(m)
def initData(self):
"""初始化数据"""
self.conf = util.getConfig(CONFIG_PATH)
self.picDefaultLoadDir = self.conf.get(DEFAULT_LOAD_DIR, "pic-default-load-dir")
def resetFunc(self):
"""重置操作"""
self.printConsel("[INFO] reset inputs")
self.predictPic = None
self.showPic.setPixmap(QPixmap("")) # 图片重置
def modelAddFunc(self):
"""setting菜单中的添加模型选项,添加模型名称以及模型对应路径"""
self.modelDialog = ModelAddDialog() # 模型添加框
self.modelDialog.open()
qe = QEventLoop()
qe.exec_()
def printConsel(self, message):
"""打印消息到控制台"""
util.recordAndPrint(logger, self.console, message)
def initPdtModel(self):
"""初始化训练模型"""
self.printConsel("[INFO] initialize prediction model.")
self.pdtModel = Prediction(self.graphDir, self.stackingModelPath, self.labelMapPath)
def pdtCheck(self):
"""预测前检查资源加载"""
self.printConsel("[INFO] check resources load")
if self.predictPic == None:
self.printConsel("[ERROR] picture path is not exist, please check the path you input")
return False
return True
def mockPredictFunc(self):
"""测试"""
import random
return os.path.basename(self.predictPic), random.randint(0,30)
def predictFunc(self):
"""预测,使用Stacking继承学习方法直接预测.
后面考虑通过选择其他方法进行预测"""
if not self.pdtCheck():
return
self.printConsel("[INFO] loading predict models.")
# self.initPdtModel()
# picName, picPdt = self.pdtModel.predictSinglePic(self.predictPic)
picName, picPdt = self.mockPredictFunc() # 测试界面
self.printConsel("[INFO] picture name: {}, estimate age: {} ± {} month".format(picName, picPdt, self.gap))
def picDefaultLoadFunc(self):
"""图片默认加载目录"""
self.printConsel("[INFO] set picture default load directory.")
self.picDefaultLoadDir = QFileDialog.getExistingDirectory(self, "getExistingDirectory", "./")
self.printConsel("[INFO] set picture default load directory successful, new directory is : {}".format(self.picDefaultLoadDir))
# 保存
self.conf.set(DEFAULT_LOAD_DIR, "pic-default-load-dir", self.picDefaultLoadDir)
with open(CONFIG_PATH, 'w') as f:
self.conf.write(f)
def helpFunc(self):
"""帮助界面"""
self.printConsel("[INFO] help")
helpWin = helpWindow()
helpWin.open()
qe = QEventLoop()
qe.exec_()
def getFile(self):
"""加载图片"""
fname, _ = QFileDialog.getOpenFileName(self, 'Open file', self.picDefaultLoadDir, "Image files (*.jpg *.png)")
self.printConsel("[INFO] load picture, source : {}".format(fname))
self.predictPic = fname
self.showPic.setScaledContents (True) # 自适应
self.showPic.setPixmap(QPixmap(fname))
def processtrigger(self, q):
"""信号槽触发"""
curName = q.text()
if curName == "添加模型":
self.modelAddFunc()
elif curName == "图片默认加载目录":
self.picDefaultLoadFunc()
elif curName == "退出":
self.close()
elif curName == "使用方法":
self.helpFunc()
class ModelAddDialog(QMainWindow, ui_ModelAddDialog.Ui_ModelListView):
"""模型添加弹框"""
def __init__(self):
super(ModelAddDialog, self).__init__()
self.setupUi(self)
self.initData()
self.initOps()
def open(self):
self.show()
def initOps(self):
self.modelAddButton.clicked.connect(self.add)
self.modelDeleteButton.clicked.connect(self.delete)
def initData(self):
"""初始化列表中的数据"""
self.modelListView.clear()
self.conf = util.getConfig(CONFIG_PATH)
modelList = self.conf.options(CONF_MODEL_LIST_NAME)
for m in modelList:
curModelPath = self.conf.get(CONF_MODEL_LIST_NAME, m)
self.modelListView.addItem("{}: '{}'".format(m, curModelPath))
def delete(self):
"""删除列表中的数据"""
for item in self.modelListView.selectedItems():
removeItem = self.modelListView.takeItem(self.modelListView.row(item))
try:
boolean = self.conf.remove_option(CONF_MODEL_LIST_NAME, removeItem.text().split(":")[0])
if boolean:
logger.info("[INFO] remove item: {} successful".format(removeItem.text().split(":")[0]))
self.modelListView.removeItemWidget(removeItem)
with open(CONFIG_PATH, 'w') as f:
self.conf.write(f)
else:
logger.info("[WARNING] remove item:{} fail".format(removeItem.text().split(":")[0]))
except Exception as e:
logger.error("[ERROR] remove item:{} fail, trace: {}".format(removeItem.text().split(":")[0], str(e)))
self.initData()
def add(self):
"""添加模型"""
self.child = ModelChildDialog()
self.child.open()
self.initData()
qe = QEventLoop()
qe.exec_()
class ModelChildDialog(QMainWindow, ui_ModelAddDialogChild.Ui_modelChildDIalog):
"""模型添加的模态框"""
def __init__(self):
super(ModelChildDialog, self).__init__()
self.setupUi(self)
self.modelPath = None
self.initOps()
def initOps(self):
"""初始化信号槽"""
self.modelAddOk.clicked.connect(self.accept)
self.modelAddCancle.clicked.connect(self.cancel)
self.filePathButton.clicked.connect(self.getFile)
def accept(self):
"""确认"""
modelName = self.modelNameInput.text()
conf = util.getConfig(CONFIG_PATH)
# print(modelName, self.modelPath)
if modelName != None and self.modelPath != None:
conf.set(CONF_MODEL_LIST_NAME, modelName, self.modelPath)
with open(CONFIG_PATH, 'w') as f:
conf.write(f)
self.close()
def cancel(self):
"""取消"""
self.close()
def getFile(self):
"""选择节点路径"""
self.modelPath, _ = QFileDialog.getOpenFileName(self, 'Open file', 'c:\\',"model file (*.pb)")
def open(self):
self.show()
class helpWindow(QMainWindow, helpDialog.Ui_Dialog):
"""帮助界面"""
def __init__(self):
super(helpWindow, self).__init__()
self.setupUi(self)
self.helpButtonOk.clicked.connect(self.acceptFunc)
def open(self):
self.show()
def acceptFunc(self):
self.close()
class PredictionHandler(object):
"""预测基类"""
def __init__(self, graphDir=None, stackingModelPath=None):
self.picList = None # 预测图片列表
self.frozenGraphName = "frozen_graph.pb"
self.graphDir = graphDir
self.stackingModel = None
self.initData(stackingModelPath)
def initData(self, stackingModelPath):
"""初始化数据和模型"""
# self.picList = [] # 初始化图片列表
self.stackingModel = self.loadModel(stackingModelPath)
def loadModel(self, stackingModelPath):
"""加载模型"""
return pickle.load(open(stackingModelPath,'rb'))
def checkEnv(self):
"""预测前检查资源加载"""
if self.picList == None:
assert False, "picture path is empty"
if self.graphDir == None:
assert False, "model graph path is not exist"
if self.stackingModel == None:
assert False, "train model is not initialize"
def preProcess(self, data, alpha = 0.99, isTotal = False):
"""数据均一化"""
m, n = np.shape(data)
ret = np.zeros((m, n))
for i in range(m):
total = np.sum(data[i, :])
maxValue = np.max(data[i, :])
for j in range(n):
if isTotal:
ret[i, j] = data[i, j] / tatal * alpha
else:
ret[i, j] = [data[i, j], 1][data[i, j] == 0] / maxValue * alpha
return ret
def createGraph(self, sess, modelPath):
"""创建图"""
K.clear_session()
tf.reset_default_graph()
with tf.gfile.FastGFile(modelPath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def pdtBySingleModel(self, modelPath, modelName, tensorName, picList, picSize):
"""单个模型预测"""
self.createGraph(None, modelPath)
pFn = preprocessing_factory.get_preprocessing(modelName, is_training=False)
pdtOutput = {} # 字典格式保存预测结果,{'2_m-1-1.9.png': prediction}
with tf.Session() as sess:
for picPath in picList:
tensor = sess.graph.get_tensor_by_name(tensorName)
baseName = os.path.basename(picPath)
# 获得图像
imgData = tf.gfile.FastGFile(picPath, 'rb').read()
imgData = tf.image.decode_jpeg(imgData, channels=3)
imgData = pFn(imgData, picSize, picSize)
imgData = tf.expand_dims(imgData, 0)
imgData = sess.run(imgData)
try:
prediction = sess.run(tensor, {'input:0': imgData})
prediction = np.squeeze(prediction)
pdtOutput[baseName] = prediction
except Exception as e:
print("[Error] %s" % str(e))
return pdtOutput
def getMeanOfModels(self):
"""获得多个模型预测结果,并返回预测均值"""
pdt = {}
for modelName, tesnorName, picSize in MODEL_LIST:
curModelPdt = {}
modeDir = os.path.join(self.graphDir, modelName) # 获得一个模型名称对应的目录
classList = os.listdir(modeDir) # 获得当前模型名下面的多个训练模型
for c in classList:
modelPath = os.path.join(modeDir, c, self.frozenGraphName) # 当前训练模型路径
tmpPdt = self.pdtBySingleModel(modelPath, modelName, tensorName, self.picList, picSize) # 单个模型预测单张图片
for k,v in tmpPdt.items():
v = v.argmax() # 获得数组中预测概率最高的索引
curModelPdt.get(k, []).append(v)
# 获得当前模型对图片预测的均值
count = len(classList)
for k,v in curModelPdt:
curModelPdt[k] = mean(v) # 可能会报错
# 添加单个模型预测结果到pdt中
for k,v in curModelPdt.items():
if k not in pdt:
pdt[k] = [v]
else:
pdt[k].append(v)
picNameList, testFeature = [], []
for k,v in pdt:
picNameList.append(k)
testFeature.append(v)
testFeature = np.mat(testFeature)
testFeature = self.preProcess(testFeature)
return picNameList, testFeature
def predicts(self, picPathList):
"""预测多张图片
@param picPathList 路径,列表
"""
self.picList = picPathList
self.checkEnv() # 检测
picNameList, testFeature = self.getMeanOfModels()
pdtValue = self.stackingModel.predict()
return picNameList, pdtValue
def predictSinglePic(self, picPath):
"""预测单张图片
@param picPath 路径,字符串
"""
return self.predicts([picPath])
class Prediction(PredictionHandler):
"""预测实现类"""
def __init__(self, graphDir=None, stackingModelPath=None, labelMapPath=None):
super(Prediction, self).__init__(graphDir, stackingModelPath)
self.labelMap = None
self.labelMapPath = labelMapPath
self.initLableMap()
def initLableMap(self):
"""初始化标签映射字典"""
self.labelMap = {}
with open(self.labelMapPath, "r") as f:
lines = f.readlines()
for line in lines:
k,v = line.split(" ")
self.labelMap[k] = v
def predictSinglePic(self, picPath):
"""重写父类预测方法"""
picNameList, pdtValue = self.predicts([picPath])
try:
return picNameList[0], self.labelMap[int(pdtValue[0])] # 若抛出异常
except:
assert False, "check label map"
if __name__ == '__main__':
app = QApplication(sys.argv)
myWin = MyWindow()
myWin.show()
sys.exit(app.exec_()) |
py | 1a491eceab0a73d69b0d48bb0a36933f5488370d | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cleanup task for cleaning up unneeded testcases."""
import collections
import datetime
import json
import random
from googleapiclient.errors import HttpError
from base import dates
from base import errors
from base import memoize
from base import utils
from chrome import build_info
from crash_analysis import crash_comparer
from crash_analysis import severity_analyzer
from datastore import data_handler
from datastore import data_types
from datastore import ndb_utils
from fuzzing import leak_blacklist
from handlers import base_handler
from libs import handler
from libs import mail
from libs.issue_management import issue_filer
from libs.issue_management import issue_tracker_policy
from libs.issue_management import issue_tracker_utils
from metrics import crash_stats
from metrics import logs
GENERIC_INCORRECT_COMMENT = (
'\n\nIf this is incorrect, please add the {label_text}')
OSS_FUZZ_INCORRECT_COMMENT = ('\n\nIf this is incorrect, please file a bug on '
'https://github.com/google/oss-fuzz/issues/new')
AUTO_CC_LIMIT = 5
TOP_CRASHES_LIMIT = 5
TOP_CRASHES_DAYS_LOOKBEHIND = 7
TOP_CRASHES_MIN_THRESHOLD = 50 * TOP_CRASHES_DAYS_LOOKBEHIND
TOP_CRASHES_IGNORE_CRASH_TYPES = [
'Out-of-memory',
'Stack-overflow',
'Timeout',
]
TOP_CRASHES_IGNORE_CRASH_STATES = ['NULL']
FUZZ_TARGET_UNUSED_THRESHOLD = 15
UNUSED_HEARTBEAT_THRESHOLD = 15
ProjectMap = collections.namedtuple('ProjectMap', 'jobs platforms')
def _get_predator_result_item(testcase, key, default=None):
"""Return the suspected components for a test case."""
predator_result = testcase.get_metadata('predator_result')
if not predator_result:
return default
return predator_result['result'].get(key, default)
def _append_generic_incorrect_comment(comment, policy, issue, suffix):
"""Get the generic incorrect comment."""
wrong_label = policy.label('wrong')
if not wrong_label:
return comment
return comment + GENERIC_INCORRECT_COMMENT.format(
label_text=issue.issue_tracker.label_text(wrong_label)) + suffix
def job_platform_to_real_platform(job_platform):
"""Get real platform from job platform."""
for platform in data_types.PLATFORMS:
if platform in job_platform:
return platform
raise ValueError('Unknown platform: ' + job_platform)
def cleanup_reports_metadata():
"""Delete ReportMetadata for uploaded reports."""
uploaded_reports = ndb_utils.get_all_from_query(
data_types.ReportMetadata.query(
ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)),
keys_only=True)
ndb_utils.delete_multi(uploaded_reports)
def cleanup_testcases_and_issues():
"""Clean up unneeded open testcases and their associated issues."""
jobs = data_handler.get_all_job_type_names()
testcase_keys = ndb_utils.get_all_from_query(
data_types.Testcase.query(
ndb_utils.is_false(data_types.Testcase.triaged)),
keys_only=True)
top_crashes_by_project_and_platform_map = (
get_top_crashes_for_all_projects_and_platforms())
utils.python_gc()
testcases_processed = 0
empty_issue_tracker_policy = issue_tracker_policy.get_empty()
for testcase_key in testcase_keys:
testcase_id = testcase_key.id()
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
logs.log('Processing testcase %d.' % testcase_id)
try:
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(
testcase)
if not policy:
policy = empty_issue_tracker_policy
# Issue updates.
update_os_labels(policy, testcase, issue)
update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map)
update_component_labels(testcase, issue)
update_issue_ccs_from_owners_file(policy, testcase, issue)
update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue)
update_issue_labels_for_flaky_testcase(policy, testcase, issue)
# Testcase marking rules.
mark_duplicate_testcase_as_closed_with_no_issue(testcase)
mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue)
mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue)
# Notification, to be done at end after testcase state is updated from
# previous rules.
notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
notify_issue_if_testcase_is_invalid(policy, testcase, issue)
notify_uploader_when_testcase_is_processed(policy, testcase, issue)
# Mark testcase as triage complete if both testcase and associated issue
# are closed. This also need to be done before the deletion rules.
mark_testcase_as_triaged_if_needed(testcase, issue)
# Testcase deletion rules.
delete_unreproducible_testcase_with_no_issue(testcase)
except Exception:
logs.log_error('Failed to process testcase %d.' % testcase_id)
testcases_processed += 1
if testcases_processed % 100 == 0:
utils.python_gc()
def cleanup_unused_fuzz_targets_and_jobs():
"""Clean up unused FuzzTarget and FuzzTargetJob entities."""
last_run_cutoff = utils.utcnow() - datetime.timedelta(
days=FUZZ_TARGET_UNUSED_THRESHOLD)
unused_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run < last_run_cutoff)
valid_target_jobs = data_types.FuzzTargetJob.query(
data_types.FuzzTargetJob.last_run >= last_run_cutoff)
to_delete = [t.key for t in unused_target_jobs]
valid_fuzz_targets = set(t.fuzz_target_name for t in valid_target_jobs)
for fuzz_target in ndb_utils.get_all_from_model(data_types.FuzzTarget):
if fuzz_target.fully_qualified_name() not in valid_fuzz_targets:
to_delete.append(fuzz_target.key)
ndb_utils.delete_multi(to_delete)
def get_jobs_and_platforms_for_project():
"""Return a map of projects to jobs and platforms map to use for picking top
crashes."""
all_jobs = ndb_utils.get_all_from_model(data_types.Job)
projects_to_jobs_and_platforms = {}
for job in all_jobs:
job_environment = job.get_environment()
# Skip experimental jobs.
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
continue
# Skip custom binary jobs.
if (utils.string_is_true(job_environment.get('CUSTOM_BINARY')) or
job_environment.get('SYSTEM_BINARY_DIR')):
continue
# Skip if explicitly excluded using flag.
if utils.string_is_true(job_environment.get('EXCLUDE_FROM_TOP_CRASHES')):
continue
if job.project not in projects_to_jobs_and_platforms:
projects_to_jobs_and_platforms[job.project] = ProjectMap(set(), set())
projects_to_jobs_and_platforms[job.project].jobs.add(job.name)
projects_to_jobs_and_platforms[job.project].platforms.add(
job_platform_to_real_platform(job.platform))
return projects_to_jobs_and_platforms
@memoize.wrap(memoize.Memcache(12 * 60 * 60))
def _get_crash_occurrence_platforms_from_crash_parameters(
crash_type, crash_state, security_flag, project_name, lookbehind_days):
"""Get platforms from crash stats based on crash parameters."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return []
where_clause = ('crash_type = {crash_type} AND '
'crash_state = {crash_state} AND '
'security_flag = {security_flag} AND '
'project = {project}').format(
crash_type=json.dumps(crash_type),
crash_state=json.dumps(crash_state),
security_flag=json.dumps(security_flag),
project=json.dumps(project_name),
)
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=lookbehind_days,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=1)
platforms = set()
for row in rows:
for group in row['groups']:
platform = group['name'].split(':')[0]
platforms.add(platform.lower())
return platforms
def get_platforms_from_testcase_variants(testcase):
"""Get platforms from crash stats based on crash parameters."""
variant_query = data_types.TestcaseVariant.query(
data_types.TestcaseVariant.testcase_id == testcase.key.id())
platforms = {
variant.platform
for variant in variant_query
if variant.is_similar and variant.platform
}
return platforms
def get_crash_occurrence_platforms(testcase, lookbehind_days=1):
"""Get platforms from crash stats for a testcase."""
return _get_crash_occurrence_platforms_from_crash_parameters(
testcase.crash_type, testcase.crash_state, testcase.security_flag,
testcase.project_name, lookbehind_days)
def get_top_crashes_for_all_projects_and_platforms():
"""Return top crashes for all projects and platforms."""
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return {}
projects_to_jobs_and_platforms = (get_jobs_and_platforms_for_project())
top_crashes_by_project_and_platform_map = {}
for project_name in projects_to_jobs_and_platforms:
top_crashes_by_project_and_platform_map[project_name] = {}
project_map = projects_to_jobs_and_platforms[project_name]
for platform in project_map.platforms:
where_clause = (
'crash_type NOT IN UNNEST(%s) AND '
'crash_state NOT IN UNNEST(%s) AND '
'job_type IN UNNEST(%s) AND '
'platform LIKE %s AND '
'project = %s' % (json.dumps(TOP_CRASHES_IGNORE_CRASH_TYPES),
json.dumps(TOP_CRASHES_IGNORE_CRASH_STATES),
json.dumps(list(project_map.jobs)),
json.dumps(platform.lower() + '%'),
json.dumps(project_name)))
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=TOP_CRASHES_DAYS_LOOKBEHIND,
group_by='platform',
where_clause=where_clause,
group_having_clause='',
sort_by='total_count',
offset=0,
limit=TOP_CRASHES_LIMIT)
if not rows:
continue
top_crashes_by_project_and_platform_map[project_name][platform] = [{
'crashState': row['crashState'],
'crashType': row['crashType'],
'isSecurity': row['isSecurity'],
'totalCount': row['totalCount'],
} for row in rows if row['totalCount'] >= TOP_CRASHES_MIN_THRESHOLD]
return top_crashes_by_project_and_platform_map
def get_top_crash_platforms(testcase, top_crashes_by_project_and_platform_map):
"""Return list of platforms where this testcase is a top crasher."""
if testcase.project_name not in top_crashes_by_project_and_platform_map:
return []
top_crashes_by_platform_map = top_crashes_by_project_and_platform_map[
testcase.project_name]
top_crash_platforms = set()
for platform in list(top_crashes_by_platform_map.keys()):
top_crashes = top_crashes_by_platform_map[platform]
if not top_crashes:
continue
for top_crash in top_crashes:
crash_state_comparer = crash_comparer.CrashComparer(
top_crash['crashState'], testcase.crash_state)
crash_type_comparer = crash_comparer.CrashComparer(
top_crash['crashType'], testcase.crash_type)
if (crash_state_comparer.is_similar() and
top_crash['isSecurity'] == testcase.security_flag and
(top_crash['isSecurity'] or crash_type_comparer.is_similar())):
top_crash_platforms.add(platform.lower())
return sorted(list(top_crash_platforms))
def delete_unreproducible_testcase_with_no_issue(testcase):
"""Delete an unreproducible testcase if it has no associated issue and has
been open for a certain time interval."""
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)):
return
# Make sure that testcase is not seen in crash stats for a certain time
# interval.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE):
return
testcase.key.delete()
logs.log(
'Deleted unreproducible testcase %d with no issue.' % testcase.key.id())
def mark_duplicate_testcase_as_closed_with_no_issue(testcase):
"""Closes a duplicate testcase if it has no associated issue and has been open
for a certain time interval."""
# Make sure that this testcase is a duplicate bug. If not, bail out.
if testcase.status != 'Duplicate':
return
# Make sure that this testcase has no associated bug. If not, bail out.
if testcase.bug_information:
return
# Make sure that testcase has been open for a certain time interval. We do
# a null timestamp check since some older testcases could be missing it.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp, days=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE)):
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed duplicate testcase %d with no issue.' % testcase.key.id())
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue):
"""Mark an issue as fixed if all of its associated reproducible testcase are
fixed."""
verified_label = policy.label('verified')
if not verified_label:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is closed in a status other than Fixed, like Duplicate, WontFix
# or Archived, we shouldn't change it. Bail out.
if not issue.is_open and issue.status != policy.status('fixed'):
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If the testcase is still open, no work needs to be done. Bail out.
if testcase.open:
return
# FIXME: Find a better solution to skip over reproducible tests that are now
# showing up a flaky (esp when we are unable to reproduce crash in original
# crash revision).
if testcase.fixed == 'NA':
return
# We can only verify fixed issues for reproducible testcases. If the testcase
# is unreproducible, bail out. Exception is if we explicitly marked this as
# fixed.
if testcase.one_time_crasher_flag and testcase.fixed != 'Yes':
return
# Make sure that no other testcases associated with this issue are open.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we didn't do the verification already and we didn't
# get called out on issue mistriage.
if (issue_tracker_utils.was_label_added(issue, verified_label) or
issue_tracker_utils.was_label_added(issue, policy.label('wrong'))):
return
issue.labels.add(verified_label)
comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id()
fixed_range_url = data_handler.get_fixed_range_url(testcase)
if fixed_range_url:
comment += ' in ' + fixed_range_url
else:
comment += '.'
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy, issue,
' and re-open the issue.')
skip_auto_close = data_handler.get_value_from_job_definition(
testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE')
if not skip_auto_close:
issue.status = policy.status('verified')
issue.save(new_comment=comment, notify=True)
logs.log('Mark issue %d as verified for fixed testcase %d.' %
(issue.id, testcase.key.id()))
def mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue):
"""Mark an unreproducible testcase as fixed if the associated issue is
closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# Make sure that there is an associated bug and it is in closed state.
if not issue or issue.is_open:
return
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed unreproducible testcase %d with issue closed.' %
testcase.key.id())
def mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy, testcase, issue):
"""Closes an unreproducible testcase and its associated issue after a certain
time period."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# Make sure that this testcase is an unreproducible bug. If not, bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that this testcase has an associated bug. If not, bail out.
if not testcase.bug_information:
return
# If this testcase was manually uploaded, don't change issue state as our
# reproduction result might be incorrect.
if testcase.uploader_email:
return
# Make sure that there is an associated bug and it is in open state.
if not issue or not issue.is_open:
return
# Check if there are any reproducible open testcases are associated with
# this bug. If yes, return.
similar_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_testcase:
return
# Make sure that testcase is atleast older than
# |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in
# crash stats anyway.
if (testcase.timestamp and not dates.time_has_expired(
testcase.timestamp,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Handle testcase that turned from reproducible to unreproducible. Account
# for the recent progression task run time.
last_tested_crash_time = testcase.get_metadata('last_tested_crash_time')
if (last_tested_crash_time and not dates.time_has_expired(
last_tested_crash_time,
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)):
return
# Make that there is no crash seen in the deadline period.
if get_crash_occurrence_platforms(
testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE):
return
# As a last check, do the expensive call of actually checking all issue
# comments to make sure we we didn't get called out on issue mistriage.
if issue_tracker_utils.was_label_added(issue, policy.label('wrong')):
return
# Close associated issue and testcase.
comment = ('ClusterFuzz testcase %d is flaky and no longer crashes, '
'so closing issue.' % testcase.key.id())
if utils.is_oss_fuzz():
comment += OSS_FUZZ_INCORRECT_COMMENT
else:
comment = _append_generic_incorrect_comment(comment, policy, issue,
' and re-open the issue.')
issue.status = policy.status('wontfix')
issue.save(new_comment=comment, notify=True)
testcase.fixed = 'NA'
testcase.open = False
testcase.put()
logs.log('Closed unreproducible testcase %d and associated issue.' %
testcase.key.id())
def mark_testcase_as_triaged_if_needed(testcase, issue):
"""Mark testcase as triage complete if both testcase and associated issue
are closed."""
# Check if testcase is open. If yes, bail out.
if testcase.open:
return
# Check if there is an associated bug in open state. If yes, bail out.
if issue:
# Get latest issue object to ensure our update went through.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
if issue.is_open:
return
testcase.triaged = True
testcase.put()
def mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue):
"""Mark testcase as closed if the associated issue is closed."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# Make sure we passed our deadline based on issue closed timestamp.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE)):
return
# If the issue has an ignore label, don't close the testcase and bail out.
# This helps to prevent new bugs from getting filed for legit WontFix cases.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log('Closed testcase %d with issue closed.' % testcase.key.id())
def mark_testcase_as_closed_if_job_is_invalid(testcase, jobs):
"""Mark testcase as closed if the associated job type does not exist."""
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check if the testcase job name is in the list of jobs.
if testcase.job_type in jobs:
return
testcase.open = False
testcase.fixed = 'NA'
testcase.put()
logs.log('Closed testcase %d with invalid job.' % testcase.key.id())
def notify_closed_issue_if_testcase_is_open(policy, testcase, issue):
"""Notify closed issue if associated testcase is still open after a certain
time period."""
needs_feedback_label = policy.label('needs_feedback')
if not needs_feedback_label:
return
# If the testcase is already closed, no more work to do.
if not testcase.open:
return
# Check testcase status, so as to skip unreproducible uploads.
if testcase.status not in ['Processed', 'Duplicate']:
return
# If there is no associated issue, then bail out.
if not issue or not testcase.bug_information:
return
# If the issue is still open, no work needs to be done. Bail out.
if issue.is_open:
return
# If we have already passed our deadline based on issue closed timestamp,
# no need to notify. We will close the testcase instead.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time,
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE)):
return
# Check if there is ignore label on issue already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, policy.label('ignore')):
return
# Check if we did add the notification comment already. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, needs_feedback_label):
return
issue.labels.add(needs_feedback_label)
if issue.status in [policy.status('fixed'), policy.status('verified')]:
issue_comment = (
'ClusterFuzz testcase {id} is still reproducing on tip-of-tree build '
'(trunk).\n\nPlease re-test your fix against this testcase and if the '
'fix was incorrect or incomplete, please re-open the bug.'
).format(id=testcase.key.id())
wrong_label = policy.label('wrong')
if wrong_label:
issue_comment += (
(' Otherwise, ignore this notification and add the '
'{label_text}.'
).format(label_text=issue.issue_tracker.label_text(wrong_label)))
else:
# Covers WontFix, Archived cases.
issue_comment = (
'ClusterFuzz testcase {id} is still reproducing on tip-of-tree build '
'(trunk).\n\nIf this testcase was not reproducible locally or '
'unworkable, ignore this notification and we will file another '
'bug soon with hopefully a better and workable testcase.\n\n'.format(
id=testcase.key.id()))
ignore_label = policy.label('ignore')
if ignore_label:
issue_comment += (
'Otherwise, if this is not intended to be fixed (e.g. this is an '
'intentional crash), please add the {label_text} to '
'prevent future bug filing with similar crash stacktrace.'.format(
label_text=issue.issue_tracker.label_text(ignore_label)))
issue.save(new_comment=issue_comment, notify=True)
logs.log('Notified closed issue for open testcase %d.' % testcase.key.id())
def notify_issue_if_testcase_is_invalid(policy, testcase, issue):
"""Leave comments on associated issues when test cases are no longer valid."""
invalid_fuzzer_label = policy.label('invalid_fuzzer')
if not invalid_fuzzer_label:
return
if not issue or not testcase.bug_information:
return
# If the issue is closed, there's no work to do.
if not issue.is_open:
return
# Currently, this only happens if a test case relies on a fuzzer that has
# been deleted. This can be modified if more cases are needed in the future.
if not testcase.get_metadata('fuzzer_was_deleted'):
return
# Check if we added this message once. If yes, bail out.
if issue_tracker_utils.was_label_added(issue, invalid_fuzzer_label):
return
issue_comment = (
'ClusterFuzz testcase %d is associated with an obsolete fuzzer and can '
'no longer be processed. Please close the issue if it is no longer '
'actionable.') % testcase.key.id()
issue.labels.add(invalid_fuzzer_label)
issue.save(new_comment=issue_comment, notify=True)
logs.log('Closed issue %d for invalid testcase %d.' % (issue.id,
testcase.key.id()))
def _send_email_to_uploader(testcase_id, to_email, content):
"""Send email to uploader when all the testcase tasks are finished."""
subject = 'Your testcase upload %d analysis is complete.' % testcase_id
content_with_footer = (
'%s\n\n'
'If you suspect that the result above is incorrect, '
'try re-doing that job on the testcase report page.') % content.strip()
html_content = content_with_footer.replace('\n', '<br>')
mail.send(to_email, subject, html_content)
def _get_severity_from_labels(security_severity_label, labels):
"""Get the severity from the label list."""
pattern = issue_filer.get_label_pattern(security_severity_label)
for label in labels:
match = pattern.match(label)
if match:
return severity_analyzer.string_to_severity(match.group(1))
return data_types.SecuritySeverity.MISSING
def _update_issue_security_severity_and_get_comment(policy, testcase, issue):
"""Apply a new security severity label if none exists on issue already
and return a comment on this addition. If a label already exists and does
not match security severity label on issue, then just return a comment on
what the recommended severity is."""
security_severity_label = policy.label('security_severity')
if not security_severity_label:
return ''
if not data_types.SecuritySeverity.is_valid(testcase.security_severity):
return ''
issue_severity = _get_severity_from_labels(security_severity_label,
issue.labels)
recommended_severity = issue_filer.apply_substitutions(
policy, security_severity_label, testcase)
if not recommended_severity:
return ''
recommended_severity = recommended_severity[0]
if issue_severity == data_types.SecuritySeverity.MISSING:
issue.labels.add(recommended_severity)
return ('\n\nA recommended severity was added to this bug. '
'Please change the severity if it is inaccurate.')
if issue_severity != testcase.security_severity:
return (
'\n\nThe recommended severity (%s) is different from what was assigned '
'to the bug. Please double check the accuracy of the assigned '
'severity.' % recommended_severity)
return ''
def _update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, description, update_bug_summary, notify):
"""Add issue comment when uploaded testcase is processed."""
if update_bug_summary and testcase.is_crash():
issue.title = data_handler.get_issue_summary(testcase)
# Impact labels like impacting head/beta/stable only apply for Chromium.
if testcase.project_name == 'chromium':
issue_filer.update_issue_impact_labels(testcase, issue)
# Add severity labels for all project types.
comment = description + _update_issue_security_severity_and_get_comment(
policy, testcase, issue)
issue.save(new_comment=comment, notify=notify)
def notify_uploader_when_testcase_is_processed(policy, testcase, issue):
"""Notify uploader by email when all the testcase tasks are finished."""
testcase_id = testcase.key.id()
# Check if this is a user upload. If not, bail out.
upload_metadata = data_types.TestcaseUploadMetadata.query(
data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get()
if not upload_metadata:
return
# Check that we have a valid email to send the notification. If not, bail out.
to_email = upload_metadata.uploader_email
if not to_email:
return
# If this is a bundled archive with multiple testcases, then don't send email
# for individual testcases.
if upload_metadata.bundled:
return
# Check if the notification is already sent once. If yes, bail out.
if data_handler.is_notification_sent(testcase_id, to_email):
return
# Make sure all testcase taks are done (e.g. minimization, regression, etc).
if not data_handler.critical_tasks_completed(testcase):
return
notify = not upload_metadata.quiet_flag
if issue and not testcase.duplicate_of:
issue_description = data_handler.get_issue_description(testcase)
_update_issue_when_uploaded_testcase_is_processed(
policy, testcase, issue, issue_description,
upload_metadata.bug_summary_update_flag, notify)
if notify:
issue_description_without_crash_state = data_handler.get_issue_description(
testcase, hide_crash_state=True)
_send_email_to_uploader(testcase_id, to_email,
issue_description_without_crash_state)
# Make sure to create notification entry, as we use this to update bug.
data_handler.create_notification_entry(testcase_id, to_email)
def update_os_labels(policy, testcase, issue):
"""Add OS labels to issue."""
os_label = policy.label('os')
if not os_label:
return
if not issue:
return
platforms = get_crash_occurrence_platforms(testcase)
platforms = platforms.union(get_platforms_from_testcase_variants(testcase))
logs.log(
'Found %d platforms for the testcase %d.' % (len(platforms),
testcase.key.id()),
platforms=platforms)
for platform in platforms:
label = os_label.replace('%PLATFORM%', platform.capitalize())
if not issue_tracker_utils.was_label_added(issue, label):
issue.labels.add(label)
issue.save(notify=False)
logs.log('Updated labels of issue %d.' % issue.id, labels=issue.labels)
def update_fuzz_blocker_label(policy, testcase, issue,
top_crashes_by_project_and_platform_map):
"""Add top crash label to issue."""
fuzz_blocker_label = policy.label('fuzz_blocker')
if not fuzz_blocker_label:
return
if not issue:
return
if not testcase.open:
return
top_crash_platforms = get_top_crash_platforms(
testcase, top_crashes_by_project_and_platform_map)
if not top_crash_platforms:
# Not a top crasher, bail out.
return
if issue_tracker_utils.was_label_added(issue, fuzz_blocker_label):
# Issue was already marked a top crasher, bail out.
return
if len(top_crash_platforms) == 1:
platform_message = '%s platform' % top_crash_platforms[0]
else:
platform_message = '%s and %s platforms' % (', '.join(
top_crash_platforms[:-1]), top_crash_platforms[-1])
fuzzer_name = (
testcase.get_metadata('fuzzer_binary_name') or testcase.fuzzer_name)
update_message = (
'This crash occurs very frequently on %s and is likely preventing the '
'fuzzer %s from making much progress. Fixing this will allow more bugs '
'to be found.' % (platform_message, fuzzer_name))
if utils.is_oss_fuzz():
update_message += OSS_FUZZ_INCORRECT_COMMENT
elif utils.is_chromium():
update_message += '\n\nMarking this bug as a blocker for next Beta release.'
update_message = _append_generic_incorrect_comment(
update_message,
policy,
issue,
' and remove the {label_text}.'.format(
label_text=issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)))
issue.labels.add(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL)
# Update with the next beta for trunk, and remove existing milestone label.
beta_milestone_label = (
'M-%d' % build_info.get_release_milestone('head', testcase.platform))
if beta_milestone_label not in issue.labels:
issue.labels.remove_by_prefix('M-')
issue.labels.add(beta_milestone_label)
logs.log(update_message)
issue.labels.add(fuzz_blocker_label)
issue.save(new_comment=update_message, notify=True)
def update_component_labels(testcase, issue):
"""Add components to the issue if needed."""
if not issue:
return
components = _get_predator_result_item(
testcase, 'suspected_components', default=[])
# Remove components already in issue or whose more specific variants exist.
filtered_components = []
for component in components:
found_component_in_issue = any(
component == issue_component or issue_component.startswith(component +
'>')
for issue_component in issue.components)
if not found_component_in_issue:
filtered_components.append(component)
if not filtered_components:
# If there are no new components to add, then we shouldn't make any changes
# to issue.
return
# Don't run on issues we've already applied automatic components to in case
# labels are removed manually. This may cause issues in the event that we
# rerun a test case, but it seems like a reasonable tradeoff to avoid spam.
if issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL):
return
for filtered_component in filtered_components:
issue.components.add(filtered_component)
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)
issue_comment = (
'Automatically applying components based on crash stacktrace and '
'information from OWNERS files.\n\n'
'If this is incorrect, please apply the {label_text}.'.format(
label_text=issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_COMPONENTS_LABEL)))
issue.save(new_comment=issue_comment, notify=True)
def update_issue_ccs_from_owners_file(policy, testcase, issue):
"""Add cc to an issue based on owners list from owners file. This is
currently applicable to fuzz targets only."""
auto_cc_label = policy.label('auto_cc_from_owners')
if not auto_cc_label:
return
if not issue or not issue.is_open:
return
if testcase.get_metadata('has_issue_ccs_from_owners_file'):
return
ccs_list = utils.parse_delimited(
testcase.get_metadata('issue_owners', ''),
delimiter=',',
strip=True,
remove_empty=True)
if not ccs_list:
return
# If we've assigned the ccs before, it likely means we were incorrect.
# Don't try again for this particular issue.
if issue_tracker_utils.was_label_added(issue, auto_cc_label):
return
ccs_added = False
actions = list(issue.actions)
for cc in random.sample(ccs_list, min(AUTO_CC_LIMIT, len(ccs_list))):
if cc in issue.ccs:
continue
# If cc was previously manually removed from the cc list, we assume that
# they were incorrectly added. Don't try to add them again.
cc_was_removed = any(cc in action.ccs.removed for action in actions)
if cc_was_removed:
continue
issue.ccs.add(cc)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue_comment = (
'Automatically adding ccs based on OWNERS file / target commit history.')
if utils.is_oss_fuzz():
issue_comment += OSS_FUZZ_INCORRECT_COMMENT + '.'
else:
issue_comment = _append_generic_incorrect_comment(issue_comment, policy,
issue, '.')
issue.labels.add(auto_cc_label)
issue.save(new_comment=issue_comment, notify=True)
def update_issue_labels_for_flaky_testcase(policy, testcase, issue):
"""Update issue reproducibility label when testcase becomes flaky or
unreproducible."""
if not issue or not issue.is_open:
return
# If the testcase is reproducible, then no change is needed. Bail out.
if not testcase.one_time_crasher_flag:
return
# Make sure that no other reproducible testcases associated with this issue
# are open. If yes, no need to update label.
similar_reproducible_testcase = data_types.Testcase.query(
data_types.Testcase.bug_information == testcase.bug_information,
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if similar_reproducible_testcase:
return
reproducible_label = policy.label('reproducible')
unreproducible_label = policy.label('unreproducible')
if not reproducible_label or not unreproducible_label:
return
# Make sure that this issue is not already marked Unreproducible.
if unreproducible_label in issue.labels:
return
issue.labels.remove(reproducible_label)
issue.labels.add(unreproducible_label)
comment = ('ClusterFuzz testcase {testcase_id} appears to be flaky, '
'updating reproducibility {label_type}.'.format(
testcase_id=testcase.key.id(),
label_type=issue.issue_tracker.label_type))
issue.save(new_comment=comment)
def update_issue_owner_and_ccs_from_predator_results(policy,
testcase,
issue,
only_allow_ccs=False):
"""Assign the issue to an appropriate owner if possible."""
if not issue or not issue.is_open:
return
# If the issue already has an owner, we don't need to update the bug.
if issue.assignee:
return
# If there are more than 3 suspected CLs, we can't be confident in the
# results. Just skip any sort of notification to CL authors in this case.
suspected_cls = _get_predator_result_item(testcase, 'suspected_cls')
if not suspected_cls or len(suspected_cls) > 3:
return
# If we've assigned an owner or cc once before, it likely means we were
# incorrect. Don't try again for this particular issue.
if (issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL) or
issue_tracker_utils.was_label_added(
issue, data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)):
return
# Validate that the suspected CLs have all of the information we need before
# continuing. This allows us to assume that they are well-formed later,
# avoiding any potential exceptions that would interrupt this task.
for suspected_cl in suspected_cls:
url = suspected_cl.get('url')
description = suspected_cl.get('description')
author = suspected_cl.get('author')
if not url or not description or not author:
logs.log_error(
'Suspected CL for testcase %d is missing required information.' %
testcase.key.id())
return
if len(suspected_cls) == 1 and not only_allow_ccs:
suspected_cl = suspected_cls[0]
# If this owner has already been assigned before but has since been removed,
# don't assign it to them again.
for action in issue.actions:
if action.assignee == suspected_cls[0]['author']:
return
# We have high confidence for the single-CL case, so we assign the owner.
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL)
issue.assignee = suspected_cl['author']
issue.status = policy.status('assigned')
issue_comment = (
'Automatically assigning owner based on suspected regression '
'changelist %s (%s).\n\n'
'If this is incorrect, please let us know why and apply the %s '
'label. If you aren\'t the correct owner for this issue, please '
'unassign yourself as soon as possible so it can be re-triaged.' %
(suspected_cl['url'], suspected_cl['description'],
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL))
else:
if testcase.get_metadata('has_issue_ccs_from_predator_results'):
return
issue_comment = (
'Automatically adding ccs based on suspected regression changelists:'
'\n\n')
ccs_added = False
for suspected_cl in suspected_cls:
# Update the comment with the suspected CL, regardless of whether or not
# we're ccing the author. This might, for example, catch the attention of
# someone who has already been cced.
author = suspected_cl['author']
issue_comment += '%s by %s - %s\n\n' % (suspected_cl['description'],
author, suspected_cl['url'])
if author in issue.ccs:
continue
# If an author has previously been manually removed from the cc list,
# we assume they were incorrectly added. Don't try to add them again.
author_was_removed = False
for action in issue.actions:
if author in action.ccs.removed:
author_was_removed = True
break
if author_was_removed:
continue
issue.ccs.add(author)
ccs_added = True
if not ccs_added:
# Everyone we'd expect to see has already been cced on the issue. No need
# to spam it with another comment. Also, set the metadata to avoid doing
# this again.
testcase.set_metadata('has_issue_ccs_from_owners_file', True)
return
issue.labels.add(data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)
issue_comment += ((
'If this is incorrect, please let us know why and apply the '
'{label_text}.').format(
label_text=issue.issue_tracker.label_text(
data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL)))
try:
issue.save(new_comment=issue_comment, notify=True)
except HttpError:
# If we see such an error when we aren't setting an owner, it's unexpected.
if only_allow_ccs or not issue.assignee:
logs.log_error(
'Unable to update issue for test case %d.' % testcase.key.id())
return
# Retry without setting the owner. They may not be a chromium project
# member, in which case we can try falling back to cc.
issue = issue_tracker_utils.get_issue_for_testcase(testcase)
update_issue_owner_and_ccs_from_predator_results(
policy, testcase, issue, only_allow_ccs=True)
def cleanup_unused_heartbeats():
"""Clean up unused heartbeat entities."""
cutoff_time = utils.utcnow() - datetime.timedelta(
days=UNUSED_HEARTBEAT_THRESHOLD)
unused_heartbeats = ndb_utils.get_all_from_query(
data_types.Heartbeat.query(
data_types.Heartbeat.last_beat_time < cutoff_time),
keys_only=True)
ndb_utils.delete_multi(unused_heartbeats)
class Handler(base_handler.Handler):
"""Cleanup."""
@handler.check_cron()
def get(self):
cleanup_testcases_and_issues()
cleanup_reports_metadata()
leak_blacklist.cleanup_global_blacklist()
cleanup_unused_fuzz_targets_and_jobs()
cleanup_unused_heartbeats()
|
py | 1a491f9ff5715ee868d5825c44526618d9c677d0 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from oneflow.compatible.single_client.core.summary import projector_pb2 as projector_pb2
from oneflow.compatible.single_client.python.oneflow_export import oneflow_export
import time
from oneflow.compatible import single_client as flow
@oneflow_export("summary.Projector")
class Projector(object):
r"""The class of Projector
This class can create an 'embedding_projector' or 'exception_projector'
"""
def __init__(self, logdir=None):
r"""Create a Projector objector
Args:
logdir: The log dir
Raises:
Exception: If 'logdir' is None or illegal
"""
if logdir is None:
raise Exception("logdir should not be None!")
logdir += "/projector"
if not os.path.exists(logdir):
os.makedirs(logdir)
self.logdir_ = logdir
self.embedding_filename_ = None
self.exception_filename_ = None
def create_embedding_projector(self):
if (self.embedding_filename_ is not None) and (
os.path.exists(self.embedding_filename_)
):
raise OSError("You must create only one embedding projector!")
self.embedding_filename_ = (
self.logdir_ + "/projector." + str(int(time.time())) + ".log"
)
def create_exception_projector(self):
if (self.exception_filename_ is not None) and (
os.path.exists(self.exception_filename_)
):
raise OSError("You must create only one embedding projector!")
self.exception_filename_ = (
self.logdir_ + "/projector.gradit." + str(int(time.time())) + ".log"
)
@property
def logdir(self):
return self.logdir_
@property
def exception_filename(self):
return self.exception_filename_
@property
def embedding_filename(self):
return self.embedding_filename_
def write_projector(self, filename=None, projector=None):
with open(filename, "wb") as f:
f.write(projector.SerializeToString())
f.flush()
def set_tensor(self, tensor: projector_pb2.Tensor, value):
for d in value.shape:
td = tensor.shape.dim.add()
td.size = d
tensor.dtype = str(value.dtype)
tensor.content = value.tobytes()
def set_projector(self, pro, tag, step, value, label=None):
pro.tag = str(tag)
pro.step = step
pro.WALL_TIME = time.time()
self.set_tensor(pro.value, value)
if label is not None:
self.set_tensor(pro.label, label)
def set_sample(self, sample, name, x, sample_type):
if name is not None:
sample.name = name
if sample_type == "image" or sample_type == "IMAGE":
sample.type = projector_pb2.Sample.SampleType.IMAGE
elif sample_type == "audio" or sample_type == "AUDIO":
sample.type = projector_pb2.Sample.SampleType.AUDIO
elif sample_type == "text" or sample_type == "TEXT":
sample.type = projector_pb2.Sample.SampleType.TEXT
else:
raise NotImplementedError
if x is not None:
self.set_tensor(sample.X, x)
def embedding_projector(
self,
value=None,
label=None,
tag=None,
step=None,
sample_name=None,
sample_type=None,
x=None,
):
if tag is None:
tag = "embedding_projector"
summary_projector = projector_pb2.SummaryProjector()
summary_projector.metadata.type = projector_pb2.MetaData.ProjectorType.EMBEDDING
projector = summary_projector.projector.add()
self.set_projector(pro=projector, tag=tag, step=step, value=value, label=label)
if (sample_name is not None) and (sample_type is not None):
self.set_sample(
sample=summary_projector.sample,
name=sample_name,
x=x,
sample_type=sample_type,
)
self.write_projector(self.embedding_filename_, summary_projector)
def exception_projector(
self,
value=None,
tag=None,
step=None,
sample_name=None,
sample_type=None,
x=None,
):
if tag is None:
tag = "exception_projector"
summary_projector = projector_pb2.SummaryProjector()
summary_projector.metadata.type = projector_pb2.MetaData.ProjectorType.EXCEPTION
projector = summary_projector.projector.add()
self.set_projector(pro=projector, tag=tag, step=step, value=value)
if (sample_name is not None) and (sample_type is not None):
self.set_sample(
sample=summary_projector.sample,
name=sample_name,
x=x,
sample_type=sample_type,
)
self.write_projector(self.exception_filename_, summary_projector)
|
py | 1a49200fff9ea30acd78d97070cf165a6d7953c8 | """Binary classes"""
import binascii
import gzip
import io
import json
import logging
import os
import shutil
import struct
import subprocess
import sys
import tarfile
import tempfile
import zipfile
from json import dumps
from typing import Optional
import h5py
import numpy as np
import pysam
import pysam.bcftools
from bx.seq.twobit import TWOBIT_MAGIC_NUMBER, TWOBIT_MAGIC_NUMBER_SWAP
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.data import (
DatatypeValidation,
get_file_peek,
)
from galaxy.datatypes.metadata import (
DictParameter,
FileParameter,
ListParameter,
MetadataElement,
MetadataParameter,
)
from galaxy.datatypes.sniff import build_sniff_from_prefix
from galaxy.util import nice_size, sqlite
from galaxy.util.checkers import is_bz2, is_gzip
from . import data, dataproviders
log = logging.getLogger(__name__)
# pysam 0.16.0.1 emits logs containing the word 'Error', this can confuse the stdout/stderr checkers.
# Can be be removed once https://github.com/pysam-developers/pysam/issues/939 is resolved.
pysam.set_verbosity(0)
# Currently these supported binary data types must be manually set on upload
class Binary(data.Data):
"""Binary data"""
edam_format = "format_2333"
file_ext = "binary"
@staticmethod
def register_sniffable_binary_format(data_type, ext, type_class):
"""Deprecated method."""
@staticmethod
def register_unsniffable_binary_ext(ext):
"""Deprecated method."""
def set_peek(self, dataset, **kwd):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = 'binary data'
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'application/octet-stream'
class Ab1(Binary):
"""Class describing an ab1 binary sequence file"""
file_ext = "ab1"
edam_format = "format_3000"
edam_data = "data_0924"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary ab1 sequence file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary ab1 sequence file ({nice_size(dataset.get_size())})"
class Idat(Binary):
"""Binary data in idat format"""
file_ext = "idat"
edam_format = "format_2058"
edam_data = "data_2603"
def sniff(self, filename):
try:
header = open(filename, 'rb').read(4)
if header == b'IDAT':
return True
return False
except Exception:
return False
class Cel(Binary):
""" Cel File format described at:
http://media.affymetrix.com/support/developer/powertools/changelog/gcos-agcc/cel.html
"""
file_ext = "cel"
edam_format = "format_1638"
edam_data = "data_3110"
MetadataElement(name="version", default="3", desc="Version", readonly=True, visible=True,
optional=True, no_value="3")
def sniff(self, filename):
"""
Try to guess if the file is a Cel file.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('affy_v_agcc.cel')
>>> Cel().sniff(fname)
True
>>> fname = get_test_fname('affy_v_3.cel')
>>> Cel().sniff(fname)
True
>>> fname = get_test_fname('affy_v_4.cel')
>>> Cel().sniff(fname)
True
>>> fname = get_test_fname('test.gal')
>>> Cel().sniff(fname)
False
"""
with open(filename, 'rb') as handle:
header_bytes = handle.read(8)
found_cel_4 = False
found_cel_3 = False
found_cel_agcc = False
if struct.unpack("<ii", header_bytes[:9]) == (64, 4):
found_cel_4 = True
elif struct.unpack(">bb", header_bytes[:2]) == (59, 1):
found_cel_agcc = True
elif header_bytes.decode("utf8", errors="ignore").startswith('[CEL]'):
found_cel_3 = True
return found_cel_3 or found_cel_4 or found_cel_agcc
def set_meta(self, dataset, **kwd):
"""
Set metadata for Cel file.
"""
with open(dataset.file_name, 'rb') as handle:
header_bytes = handle.read(8)
if struct.unpack("<ii", header_bytes[:9]) == (64, 4):
dataset.metadata.version = "4"
elif struct.unpack(">bb", header_bytes[:2]) == (59, 1):
dataset.metadata.version = "agcc"
elif header_bytes.decode("utf8", errors="ignore").startswith('[CEL]'):
dataset.metadata.version = "3"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.blurb = f"Cel version: {dataset.metadata.version}"
dataset.peek = get_file_peek(dataset.file_name)
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class MashSketch(Binary):
"""
Mash Sketch file.
Sketches are used by the MinHash algorithm to allow fast distance estimations
with low storage and memory requirements. To make a sketch, each k-mer in a sequence
is hashed, which creates a pseudo-random identifier. By sorting these identifiers (hashes),
a small subset from the top of the sorted list can represent the entire sequence (these are min-hashes).
The more similar another sequence is, the more min-hashes it is likely to share.
"""
file_ext = "msh"
class CompressedArchive(Binary):
"""
Class describing an compressed binary file
This class can be sublass'ed to implement archive filetypes that will not be unpacked by upload.py.
"""
file_ext = "compressed_archive"
compressed = True
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Compressed binary file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Compressed binary file ({nice_size(dataset.get_size())})"
class Meryldb(CompressedArchive):
"""MerylDB is a tar.gz archive, with 128 files. 64 data files and 64 index files."""
file_ext = "meryldb"
def sniff(self, filename):
"""
Try to guess if the file is a Cel file.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('affy_v_agcc.cel')
>>> Meryldb().sniff(fname)
False
>>> fname = get_test_fname('read-db.meryldb')
>>> Meryldb().sniff(fname)
True
"""
try:
if filename and tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as temptar:
_tar_content = temptar.getnames()
# 64 data files ad 64 indices + 2 folders
if len(_tar_content) == 130:
if len([_ for _ in _tar_content if _.endswith('.merylIndex')]) == 64:
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
class Bref3(Binary):
"""Bref3 format is a binary format for storing phased, non-missing genotypes for a list of samples."""
file_ext = "bref3"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("7a8874f400156272")
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(self._magic)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary bref3 file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary bref3 file ({nice_size(dataset.get_size())})"
class DynamicCompressedArchive(CompressedArchive):
def matches_any(self, target_datatypes):
"""Treat two aspects of compressed datatypes separately.
"""
compressed_target_datatypes = []
uncompressed_target_datatypes = []
for target_datatype in target_datatypes:
if hasattr(target_datatype, "uncompressed_datatype_instance") and target_datatype.compressed_format == self.compressed_format:
uncompressed_target_datatypes.append(target_datatype.uncompressed_datatype_instance)
else:
compressed_target_datatypes.append(target_datatype)
# TODO: Add gz and bz2 as proper datatypes and use those instances instead of
# CompressedArchive() in the following check.
return self.uncompressed_datatype_instance.matches_any(uncompressed_target_datatypes) or \
CompressedArchive().matches_any(compressed_target_datatypes)
class GzDynamicCompressedArchive(DynamicCompressedArchive):
compressed_format = "gzip"
class Bz2DynamicCompressedArchive(DynamicCompressedArchive):
compressed_format = "bz2"
class CompressedZipArchive(CompressedArchive):
"""
Class describing an compressed binary file
This class can be sublass'ed to implement archive filetypes that will not be unpacked by upload.py.
"""
file_ext = "zip"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Compressed zip file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Compressed zip file ({nice_size(dataset.get_size())})"
def sniff(self, filename):
with zipfile.ZipFile(filename) as zf:
zf_files = zf.infolist()
count = 0
for f in zf_files:
if f.file_size > 0 and not f.filename.startswith('__MACOSX/') and not f.filename.endswith('.DS_Store'):
count += 1
if count > 1:
return True
class GenericAsn1Binary(Binary):
"""Class for generic ASN.1 binary format"""
file_ext = "asn1-binary"
edam_format = "format_1966"
edam_data = "data_0849"
class _BamOrSam:
"""
Helper class to set the metadata common to sam and bam files
"""
def set_meta(self, dataset, overwrite=True, **kwd):
try:
bam_file = pysam.AlignmentFile(dataset.file_name, mode='rb')
# TODO: Reference names, lengths, read_groups and headers can become very large, truncate when necessary
dataset.metadata.reference_names = list(bam_file.references)
dataset.metadata.reference_lengths = list(bam_file.lengths)
dataset.metadata.bam_header = dict(bam_file.header.items())
dataset.metadata.read_groups = [read_group['ID'] for read_group in dataset.metadata.bam_header.get('RG', []) if 'ID' in read_group]
dataset.metadata.sort_order = dataset.metadata.bam_header.get('HD', {}).get('SO', None)
dataset.metadata.bam_version = dataset.metadata.bam_header.get('HD', {}).get('VN', None)
except Exception:
# Per Dan, don't log here because doing so will cause datasets that
# fail metadata to end in the error state
pass
class BamNative(CompressedArchive, _BamOrSam):
"""Class describing a BAM binary file that is not necessarily sorted"""
edam_format = "format_2572"
edam_data = "data_0863"
file_ext = "unsorted.bam"
sort_flag: Optional[str] = None
MetadataElement(name="columns", default=12, desc="Number of columns", readonly=True, visible=False, no_value=0)
MetadataElement(name="column_types", default=['str', 'int', 'str', 'int', 'int', 'str', 'str', 'int', 'int', 'str', 'str', 'str'], desc="Column types", param=metadata.ColumnTypesParameter, readonly=True, visible=False, no_value=[])
MetadataElement(name="column_names", default=['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'MRNM', 'MPOS', 'ISIZE', 'SEQ', 'QUAL', 'OPT'], desc="Column names", readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="bam_version", default=None, desc="BAM Version", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=None)
MetadataElement(name="sort_order", default=None, desc="Sort Order", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=None)
MetadataElement(name="read_groups", default=[], desc="Read Groups", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="reference_names", default=[], desc="Chromosome Names", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="reference_lengths", default=[], desc="Chromosome Lengths", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value=[])
MetadataElement(name="bam_header", default={}, desc="Dictionary of BAM Headers", param=MetadataParameter, readonly=True, visible=False, optional=True, no_value={})
def set_meta(self, dataset, overwrite=True, **kwd):
_BamOrSam().set_meta(dataset)
@staticmethod
def merge(split_files, output_file):
"""
Merges BAM files
:param split_files: List of bam file paths to merge
:param output_file: Write merged bam file to this location
"""
pysam.merge('-O', 'BAM', output_file, *split_files)
def init_meta(self, dataset, copy_from=None):
Binary.init_meta(self, dataset, copy_from=copy_from)
def sniff(self, filename):
return BamNative.is_bam(filename)
@classmethod
def is_bam(cls, filename):
# BAM is compressed in the BGZF format, and must not be uncompressed in Galaxy.
# The first 4 bytes of any bam file is 'BAM\1', and the file is binary.
try:
header = gzip.open(filename).read(4)
if header == b'BAM\1':
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary bam alignments file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary bam alignments file ({nice_size(dataset.get_size())})"
def to_archive(self, dataset, name=""):
rel_paths = []
file_paths = []
rel_paths.append(f"{name or dataset.file_name}.{dataset.extension}")
file_paths.append(dataset.file_name)
rel_paths.append(f"{name or dataset.file_name}.{dataset.extension}.bai")
file_paths.append(dataset.metadata.bam_index.file_name)
return zip(file_paths, rel_paths)
def groom_dataset_content(self, file_name):
"""
Ensures that the BAM file contents are coordinate-sorted. This function is called
on an output dataset after the content is initially generated.
"""
# Use pysam to sort the BAM file
# This command may also creates temporary files <out.prefix>.%d.bam when the
# whole alignment cannot fit into memory.
# do this in a unique temp directory, because of possible <out.prefix>.%d.bam temp files
if not self.dataset_content_needs_grooming(file_name):
# Don't re-sort if already sorted
return
tmp_dir = tempfile.mkdtemp()
tmp_sorted_dataset_file_name_prefix = os.path.join(tmp_dir, 'sorted')
sorted_file_name = f"{tmp_sorted_dataset_file_name_prefix}.bam"
slots = os.environ.get('GALAXY_SLOTS', 1)
sort_args = []
if self.sort_flag:
sort_args = [self.sort_flag]
sort_args.extend([f"-@{slots}", file_name, '-T', tmp_sorted_dataset_file_name_prefix, '-O', 'BAM', '-o', sorted_file_name])
try:
pysam.sort(*sort_args)
except Exception:
shutil.rmtree(tmp_dir, ignore_errors=True)
raise
# Move samtools_created_sorted_file_name to our output dataset location
shutil.move(sorted_file_name, file_name)
# Remove temp file and empty temporary directory
os.rmdir(tmp_dir)
def get_chunk(self, trans, dataset, offset=0, ck_size=None):
if not offset == -1:
try:
with pysam.AlignmentFile(dataset.file_name, "rb") as bamfile:
ck_size = 300 # 300 lines
ck_data = ""
header_line_count = 0
if offset == 0:
ck_data = bamfile.text.replace('\t', ' ')
header_line_count = bamfile.text.count('\n')
else:
bamfile.seek(offset)
for line_number, alignment in enumerate(bamfile):
# return only Header lines if 'header_line_count' exceeds 'ck_size'
# FIXME: Can be problematic if bam has million lines of header
offset = bamfile.tell()
if (line_number + header_line_count) > ck_size:
break
else:
bamline = alignment.tostring(bamfile)
# Galaxy display each tag as separate column because 'tostring()' funcition put tabs in between each tag of tags column.
# Below code will remove spaces between each tag.
bamline_modified = ('\t').join(bamline.split()[:11] + [(' ').join(bamline.split()[11:])])
ck_data = f"{ck_data}\n{bamline_modified}"
else:
# Nothing to enumerate; we've either offset to the end
# of the bamfile, or there is no data. (possible with
# header-only bams)
offset = -1
except Exception as e:
offset = -1
ck_data = f"Could not display BAM file, error was:\n{e}"
else:
ck_data = ''
offset = -1
return dumps({'ck_data': util.unicodify(ck_data),
'offset': offset})
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd):
preview = util.string_as_bool(preview)
if offset is not None:
return self.get_chunk(trans, dataset, offset, ck_size)
elif to_ext or not preview:
return super().display_data(trans, dataset, preview, filename, to_ext, **kwd)
else:
column_names = dataset.metadata.column_names
if not column_names:
column_names = []
column_types = dataset.metadata.column_types
if not column_types:
column_types = []
column_number = dataset.metadata.columns
if column_number is None:
column_number = 1
return trans.fill_template("/dataset/tabular_chunked.mako",
dataset=dataset,
chunk=self.get_chunk(trans, dataset, 0),
column_number=column_number,
column_names=column_names,
column_types=column_types)
def validate(self, dataset, **kwd):
if not BamNative.is_bam(dataset.file_name):
return DatatypeValidation.invalid("This dataset does not appear to a BAM file.")
elif self.dataset_content_needs_grooming(dataset.file_name):
return DatatypeValidation.invalid("This BAM file does not appear to have the correct sorting for declared datatype.")
return DatatypeValidation.validated()
@dataproviders.decorators.has_dataproviders
class Bam(BamNative):
"""Class describing a BAM binary file"""
edam_format = "format_2572"
edam_data = "data_0863"
file_ext = "bam"
track_type = "ReadTrack"
data_sources = {"data": "bai", "index": "bigwig"}
MetadataElement(name="bam_index", desc="BAM Index File", param=metadata.FileParameter, file_ext="bai", readonly=True, no_value=None, visible=False, optional=True)
MetadataElement(name="bam_csi_index", desc="BAM CSI Index File", param=metadata.FileParameter, file_ext="bam.csi", readonly=True, no_value=None, visible=False, optional=True)
def get_index_flag(self, file_name):
"""
Return pysam flag for bai index (default) or csi index (contig size > (2**29 - 1) )
"""
index_flag = '-b' # bai index
try:
with pysam.AlignmentFile(file_name) as alignment_file:
if max(alignment_file.header.lengths) > (2 ** 29) - 1:
index_flag = '-c' # csi index
except Exception:
# File may not have a header, that's OK
pass
return index_flag
def dataset_content_needs_grooming(self, file_name):
"""
Check if file_name is a coordinate-sorted BAM file
"""
# The best way to ensure that BAM files are coordinate-sorted and indexable
# is to actually index them.
index_flag = self.get_index_flag(file_name)
index_name = tempfile.NamedTemporaryFile(prefix="bam_index").name
try:
# If pysam fails to index a file it will write to stderr,
# and this causes the set_meta script to fail. So instead
# we start another process and discard stderr.
if index_flag == '-b':
# IOError: No such file or directory: '-b' if index_flag is set to -b (pysam 0.15.4)
cmd = ['python', '-c', f"import pysam; pysam.set_verbosity(0); pysam.index('{file_name}', '{index_name}')"]
else:
cmd = ['python', '-c', f"import pysam; pysam.set_verbosity(0); pysam.index('{index_flag}', '{file_name}', '{index_name}')"]
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stderr=devnull, shell=False)
needs_sorting = False
except subprocess.CalledProcessError:
needs_sorting = True
try:
os.unlink(index_name)
except Exception:
pass
return needs_sorting
def set_meta(self, dataset, overwrite=True, **kwd):
# These metadata values are not accessible by users, always overwrite
super().set_meta(dataset=dataset, overwrite=overwrite, **kwd)
index_flag = self.get_index_flag(dataset.file_name)
if index_flag == '-b':
spec_key = 'bam_index'
index_file = dataset.metadata.bam_index
else:
spec_key = 'bam_csi_index'
index_file = dataset.metadata.bam_csi_index
if not index_file:
index_file = dataset.metadata.spec[spec_key].param.new_file(dataset=dataset)
if index_flag == '-b':
# IOError: No such file or directory: '-b' if index_flag is set to -b (pysam 0.15.4)
pysam.index(dataset.file_name, index_file.file_name)
else:
pysam.index(index_flag, dataset.file_name, index_file.file_name)
dataset.metadata.bam_index = index_file
def sniff(self, file_name):
return super().sniff(file_name) and not self.dataset_content_needs_grooming(file_name)
# ------------- Dataproviders
# pipe through samtools view
# ALSO: (as Sam)
# bam does not use '#' to indicate comments/headers - we need to strip out those headers from the std. providers
# TODO:?? seems like there should be an easier way to do/inherit this - metadata.comment_char?
# TODO: incorporate samtools options to control output: regions first, then flags, etc.
@dataproviders.decorators.dataprovider_factory('line', dataproviders.line.FilteredLineDataProvider.settings)
def line_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.line.FilteredLineDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('regex-line', dataproviders.line.RegexLineDataProvider.settings)
def regex_line_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.line.RegexLineDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('column', dataproviders.column.ColumnarDataProvider.settings)
def column_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.column.ColumnarDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('dict', dataproviders.column.DictDataProvider.settings)
def dict_dataprovider(self, dataset, **settings):
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
settings['comment_char'] = '@'
return dataproviders.column.DictDataProvider(samtools_source, **settings)
# these can't be used directly - may need BamColumn, BamDict (Bam metadata -> column/dict)
# OR - see genomic_region_dataprovider
# @dataproviders.decorators.dataprovider_factory('dataset-column', dataproviders.column.ColumnarDataProvider.settings)
# def dataset_column_dataprovider(self, dataset, **settings):
# settings['comment_char'] = '@'
# return super().dataset_column_dataprovider(dataset, **settings)
# @dataproviders.decorators.dataprovider_factory('dataset-dict', dataproviders.column.DictDataProvider.settings)
# def dataset_dict_dataprovider(self, dataset, **settings):
# settings['comment_char'] = '@'
# return super().dataset_dict_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('header', dataproviders.line.RegexLineDataProvider.settings)
def header_dataprovider(self, dataset, **settings):
# in this case we can use an option of samtools view to provide just what we need (w/o regex)
samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset, '-H')
return dataproviders.line.RegexLineDataProvider(samtools_source, **settings)
@dataproviders.decorators.dataprovider_factory('id-seq-qual', dataproviders.column.DictDataProvider.settings)
def id_seq_qual_dataprovider(self, dataset, **settings):
settings['indeces'] = [0, 9, 10]
settings['column_types'] = ['str', 'str', 'str']
settings['column_names'] = ['id', 'seq', 'qual']
return self.dict_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region', dataproviders.column.ColumnarDataProvider.settings)
def genomic_region_dataprovider(self, dataset, **settings):
# GenomicRegionDataProvider currently requires a dataset as source - may not be necc.
# TODO:?? consider (at least) the possible use of a kwarg: metadata_source (def. to source.dataset),
# or remove altogether...
# samtools_source = dataproviders.dataset.SamtoolsDataProvider(dataset)
# return dataproviders.dataset.GenomicRegionDataProvider(samtools_source, metadata_source=dataset,
# 2, 3, 3, **settings)
# instead, set manually and use in-class column gen
settings['indeces'] = [2, 3, 3]
settings['column_types'] = ['str', 'int', 'int']
return self.column_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region-dict', dataproviders.column.DictDataProvider.settings)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings['indeces'] = [2, 3, 3]
settings['column_types'] = ['str', 'int', 'int']
settings['column_names'] = ['chrom', 'start', 'end']
return self.dict_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('samtools')
def samtools_dataprovider(self, dataset, **settings):
"""Generic samtools interface - all options available through settings."""
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SamtoolsDataProvider(dataset_source, **settings)
class ProBam(Bam):
"""Class describing a BAM binary file - extended for proteomics data"""
edam_format = "format_3826"
edam_data = "data_0863"
file_ext = "probam"
class BamInputSorted(BamNative):
sort_flag = '-n'
file_ext = 'qname_input_sorted.bam'
"""
A class for BAM files that can formally be unsorted or queryname sorted.
Alignments are either ordered based on the order with which the queries appear when producing the alignment,
or ordered by their queryname.
This notaby keeps alignments produced by paired end sequencing adjacent.
"""
def sniff(self, file_name):
# We never want to sniff to this datatype
return False
def dataset_content_needs_grooming(self, file_name):
"""
Groom if the file is coordinate sorted
"""
# The best way to ensure that BAM files are coordinate-sorted and indexable
# is to actually index them.
with pysam.AlignmentFile(filename=file_name) as f:
# The only sure thing we know here is that the sort order can't be coordinate
return f.header.get('HD', {}).get('SO') == 'coordinate'
class BamQuerynameSorted(BamInputSorted):
"""A class for queryname sorted BAM files."""
sort_flag = '-n'
file_ext = "qname_sorted.bam"
def sniff(self, file_name):
return BamNative().sniff(file_name) and not self.dataset_content_needs_grooming(file_name)
def dataset_content_needs_grooming(self, file_name):
"""
Check if file_name is a queryname-sorted BAM file
"""
# The best way to ensure that BAM files are coordinate-sorted and indexable
# is to actually index them.
with pysam.AlignmentFile(filename=file_name) as f:
return f.header.get('HD', {}).get('SO') != 'queryname'
class CRAM(Binary):
file_ext = "cram"
edam_format = "format_3462"
edam_data = "data_0863"
MetadataElement(name="cram_version", default=None, desc="CRAM Version", param=MetadataParameter, readonly=True, visible=False, optional=False, no_value=None)
MetadataElement(name="cram_index", desc="CRAM Index File", param=metadata.FileParameter, file_ext="crai", readonly=True, no_value=None, visible=False, optional=True)
def set_meta(self, dataset, overwrite=True, **kwd):
major_version, minor_version = self.get_cram_version(dataset.file_name)
if major_version != -1:
dataset.metadata.cram_version = f"{str(major_version)}.{str(minor_version)}"
if not dataset.metadata.cram_index:
index_file = dataset.metadata.spec['cram_index'].param.new_file(dataset=dataset)
if self.set_index_file(dataset, index_file):
dataset.metadata.cram_index = index_file
def get_cram_version(self, filename):
try:
with open(filename, "rb") as fh:
header = bytearray(fh.read(6))
return header[4], header[5]
except Exception as exc:
log.warning('%s, get_cram_version Exception: %s', self, exc)
return -1, -1
def set_index_file(self, dataset, index_file):
try:
pysam.index(dataset.file_name, index_file.file_name)
return True
except Exception as exc:
log.warning('%s, set_index_file Exception: %s', self, exc)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = 'CRAM binary alignment file'
dataset.blurb = 'binary data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
try:
header = open(filename, 'rb').read(4)
if header == b"CRAM":
return True
return False
except Exception:
return False
class BaseBcf(CompressedArchive):
edam_format = "format_3020"
edam_data = "data_3498"
class Bcf(BaseBcf):
"""
Class describing a (BGZF-compressed) BCF file
"""
file_ext = "bcf"
MetadataElement(name="bcf_index", desc="BCF Index File", param=metadata.FileParameter, file_ext="csi", readonly=True, no_value=None, visible=False, optional=True)
def sniff(self, filename):
# BCF is compressed in the BGZF format, and must not be uncompressed in Galaxy.
try:
header = gzip.open(filename).read(3)
# The first 3 bytes of any BCF file are 'BCF', and the file is binary.
if header == b'BCF':
return True
return False
except Exception:
return False
def set_meta(self, dataset, overwrite=True, **kwd):
""" Creates the index for the BCF file. """
# These metadata values are not accessible by users, always overwrite
index_file = dataset.metadata.bcf_index
if not index_file:
index_file = dataset.metadata.spec['bcf_index'].param.new_file(dataset=dataset)
# Create the bcf index
dataset_symlink = os.path.join(os.path.dirname(index_file.file_name),
'__dataset_%d_%s' % (dataset.id, os.path.basename(index_file.file_name)))
os.symlink(dataset.file_name, dataset_symlink)
try:
cmd = ['python', '-c', f"import pysam.bcftools; pysam.bcftools.index('{dataset_symlink}')"]
subprocess.check_call(cmd)
shutil.move(f"{dataset_symlink}.csi", index_file.file_name)
except Exception as e:
raise Exception(f'Error setting BCF metadata: {util.unicodify(e)}')
finally:
# Remove temp file and symlink
os.remove(dataset_symlink)
dataset.metadata.bcf_index = index_file
class BcfUncompressed(BaseBcf):
"""
Class describing an uncompressed BCF file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('1.bcf_uncompressed')
>>> BcfUncompressed().sniff(fname)
True
>>> fname = get_test_fname('1.bcf')
>>> BcfUncompressed().sniff(fname)
False
"""
file_ext = "bcf_uncompressed"
def sniff(self, filename):
try:
header = open(filename, mode='rb').read(3)
# The first 3 bytes of any BCF file are 'BCF', and the file is binary.
if header == b'BCF':
return True
return False
except Exception:
return False
class H5(Binary):
"""
Class describing an HDF5 file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.mz5')
>>> H5().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> H5().sniff(fname)
False
"""
file_ext = "h5"
edam_format = "format_3590"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("894844460d0a1a0a")
def sniff(self, filename):
# The first 8 bytes of any hdf5 file are 0x894844460d0a1a0a
try:
header = open(filename, 'rb').read(8)
if header == self._magic:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary HDF5 file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary HDF5 file ({nice_size(dataset.get_size())})"
class Loom(H5):
"""
Class describing a Loom file: http://loompy.org/
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.loom')
>>> Loom().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Loom().sniff(fname)
False
"""
file_ext = "loom"
edam_format = "format_3590"
MetadataElement(name="title", default="", desc="title", readonly=True, visible=True, no_value="")
MetadataElement(name="description", default="", desc="description", readonly=True, visible=True, no_value="")
MetadataElement(name="url", default="", desc="url", readonly=True, visible=True, no_value="")
MetadataElement(name="doi", default="", desc="doi", readonly=True, visible=True, no_value="")
MetadataElement(name="loom_spec_version", default="", desc="loom_spec_version", readonly=True, visible=True, no_value="")
MetadataElement(name="creation_date", default=None, desc="creation_date", readonly=True, visible=True, no_value=None)
MetadataElement(name="shape", default=(), desc="shape", param=metadata.ListParameter, readonly=True, visible=True, no_value=())
MetadataElement(name="layers_count", default=0, desc="layers_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="layers_names", desc="layers_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="row_attrs_count", default=0, desc="row_attrs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="row_attrs_names", desc="row_attrs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="col_attrs_count", default=0, desc="col_attrs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="col_attrs_names", desc="col_attrs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="col_graphs_count", default=0, desc="col_graphs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="col_graphs_names", desc="col_graphs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="row_graphs_count", default=0, desc="row_graphs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="row_graphs_names", desc="row_graphs_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
def sniff(self, filename):
if super().sniff(filename):
with h5py.File(filename, 'r') as loom_file:
# Check the optional but distinctive LOOM_SPEC_VERSION attribute
if bool(loom_file.attrs.get('LOOM_SPEC_VERSION')):
return True
# Check some mandatory H5 datasets and groups
for el in ('matrix', 'row_attrs', 'col_attrs'):
if loom_file.get(el) is None:
return False
else:
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary Loom file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary Loom file ({nice_size(dataset.get_size())})"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
with h5py.File(dataset.file_name, 'r') as loom_file:
dataset.metadata.title = loom_file.attrs.get('title')
dataset.metadata.description = loom_file.attrs.get('description')
dataset.metadata.url = loom_file.attrs.get('url')
dataset.metadata.doi = loom_file.attrs.get('doi')
loom_spec_version = loom_file.attrs.get('LOOM_SPEC_VERSION')
if isinstance(loom_spec_version, np.ndarray):
loom_spec_version = loom_spec_version[0]
if isinstance(loom_spec_version, bytes):
loom_spec_version = loom_spec_version.decode()
dataset.metadata.loom_spec_version = loom_spec_version
dataset.creation_date = loom_file.attrs.get('creation_date')
dataset.metadata.shape = tuple(loom_file['matrix'].shape)
tmp = list(loom_file.get('layers', {}).keys())
dataset.metadata.layers_count = len(tmp)
dataset.metadata.layers_names = tmp
tmp = list(loom_file['row_attrs'].keys())
dataset.metadata.row_attrs_count = len(tmp)
dataset.metadata.row_attrs_names = tmp
tmp = list(loom_file['col_attrs'].keys())
dataset.metadata.col_attrs_count = len(tmp)
dataset.metadata.col_attrs_names = tmp
# According to the Loom file format specification, col_graphs
# and row_graphs are mandatory groups, but files created by
# Bioconductor LoomExperiment do not always have them:
# https://github.com/Bioconductor/LoomExperiment/issues/7
tmp = list(loom_file.get('col_graphs', {}).keys())
dataset.metadata.col_graphs_count = len(tmp)
dataset.metadata.col_graphs_names = tmp
tmp = list(loom_file.get('row_graphs', {}).keys())
dataset.metadata.row_graphs_count = len(tmp)
dataset.metadata.row_graphs_names = tmp
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
class Anndata(H5):
"""
Class describing an HDF5 anndata files: http://anndata.rtfd.io
>>> from galaxy.datatypes.sniff import get_test_fname
>>> Anndata().sniff(get_test_fname('pbmc3k_tiny.h5ad'))
True
>>> Anndata().sniff(get_test_fname('test.mz5'))
False
>>> Anndata().sniff(get_test_fname('import.loom.krumsiek11.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_6_small2.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_6_small.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_7_4_small2.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_0_7_4_small.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_unk2.h5ad'))
True
>>> Anndata().sniff(get_test_fname('adata_unk.h5ad'))
True
"""
file_ext = 'h5ad'
MetadataElement(name="title", default="", desc="title", readonly=True, visible=True, no_value="")
MetadataElement(name="description", default="", desc="description", readonly=True, visible=True, no_value="")
MetadataElement(name="url", default="", desc="url", readonly=True, visible=True, no_value="")
MetadataElement(name="doi", default="", desc="doi", readonly=True, visible=True, no_value="")
MetadataElement(name="anndata_spec_version", default="", desc="anndata_spec_version", readonly=True, visible=True, no_value="")
MetadataElement(name="creation_date", default=None, desc="creation_date", readonly=True, visible=True, no_value=None)
MetadataElement(name="layers_count", default=0, desc="layers_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="layers_names", desc="layers_names", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="row_attrs_count", default=0, desc="row_attrs_count", readonly=True, visible=True, no_value=0)
# obs_names: Cell1, Cell2, Cell3,...
# obs_layers: louvain, leidein, isBcell
# obs_count: number of obs_layers
# obs_size: number of obs_names
MetadataElement(name="obs_names", desc="obs_names", default=[], multiple=True, readonly=True, no_value=None)
MetadataElement(name="obs_layers", desc="obs_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="obs_count", default=0, desc="obs_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="obs_size", default=-1, desc="obs_size", readonly=True, visible=True, no_value=0)
MetadataElement(name="obsm_layers", desc="obsm_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="obsm_count", default=0, desc="obsm_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="raw_var_layers", desc="raw_var_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="raw_var_count", default=0, desc="raw_var_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="raw_var_size", default=0, desc="raw_var_size", readonly=True, visible=True, no_value=0)
MetadataElement(name="var_layers", desc="var_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="var_count", default=0, desc="var_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="var_size", default=-1, desc="var_size", readonly=True, visible=True, no_value=0)
MetadataElement(name="varm_layers", desc="varm_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="varm_count", default=0, desc="varm_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="uns_layers", desc="uns_layers", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="uns_count", default=0, desc="uns_count", readonly=True, visible=True, no_value=0)
MetadataElement(name="shape", default=(-1, -1), desc="shape", param=metadata.ListParameter, readonly=True, visible=True, no_value=(0, 0))
def sniff(self, filename):
if super().sniff(filename):
try:
with h5py.File(filename, 'r') as f:
return all(attr in f for attr in ['X', 'obs', 'var'])
except Exception:
return False
return False
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
with h5py.File(dataset.file_name, 'r') as anndata_file:
dataset.metadata.title = anndata_file.attrs.get('title')
dataset.metadata.description = anndata_file.attrs.get('description')
dataset.metadata.url = anndata_file.attrs.get('url')
dataset.metadata.doi = anndata_file.attrs.get('doi')
dataset.creation_date = anndata_file.attrs.get('creation_date')
dataset.metadata.shape = anndata_file.attrs.get('shape', dataset.metadata.shape)
# none of the above appear to work in any dataset tested, but could be useful for
# future AnnData datasets
dataset.metadata.layers_count = len(anndata_file)
dataset.metadata.layers_names = list(anndata_file.keys())
def _layercountsize(tmp, lennames=0):
"From TMP and LENNAMES, return layers, their number, and the length of one of the layers (all equal)."
if hasattr(tmp, 'dtype'):
layers = list(tmp.dtype.names)
count = len(tmp.dtype)
size = int(tmp.size)
else:
layers = list(tmp.keys())
count = len(layers)
size = lennames
return (layers, count, size)
if 'obs' in dataset.metadata.layers_names:
tmp = anndata_file["obs"]
obs_index = None
if "index" in tmp:
obs_index = "index"
elif "_index" in tmp:
obs_index = "_index"
# Determine cell labels
if obs_index:
dataset.metadata.obs_names = list(tmp[obs_index])
elif hasattr(tmp, 'dtype'):
if "index" in tmp.dtype.names:
# Yes, we call tmp["index"], and not tmp.dtype["index"]
# here, despite the above tests.
dataset.metadata.obs_names = list(tmp["index"])
elif "_index" in tmp.dtype.names:
dataset.metadata.obs_names = list(tmp["_index"])
else:
log.warning("Could not determine cell labels for %s", self)
else:
log.warning("Could not determine observation index for %s", self)
x, y, z = _layercountsize(tmp, len(dataset.metadata.obs_names))
dataset.metadata.obs_layers = x
dataset.metadata.obs_count = y
dataset.metadata.obs_size = z
if 'obsm' in dataset.metadata.layers_names:
tmp = anndata_file["obsm"]
dataset.metadata.obsm_layers, dataset.metadata.obsm_count, _ = _layercountsize(tmp)
if 'raw.var' in dataset.metadata.layers_names:
tmp = anndata_file["raw.var"]
# full set of genes would never need to be previewed
# dataset.metadata.raw_var_names = tmp["index"]
x, y, z = _layercountsize(tmp, len(tmp["index"]))
dataset.metadata.raw_var_layers = x
dataset.metadata.raw_var_count = y
dataset.metadata.raw_var_size = z
if 'var' in dataset.metadata.layers_names:
tmp = anndata_file["var"]
var_index = None
if "index" in tmp:
var_index = "index"
elif "_index" in tmp:
var_index = "_index"
# We never use var_names
# dataset.metadata.var_names = tmp[var_index]
if var_index:
x, y, z = _layercountsize(tmp, len(tmp[var_index]))
else:
# failing to detect a var_index is not an indicator
# that the dataset is empty
x, y, z = _layercountsize(tmp)
dataset.metadata.var_layers = x
dataset.metadata.var_count = y
dataset.metadata.var_size = z
if 'varm' in dataset.metadata.layers_names:
tmp = anndata_file["varm"]
dataset.metadata.varm_layers, dataset.metadata.varm_count, _ = _layercountsize(tmp)
if 'uns' in dataset.metadata.layers_names:
tmp = anndata_file["uns"]
dataset.metadata.uns_layers, dataset.metadata.uns_count, _ = _layercountsize(tmp)
# Resolving the problematic shape parameter
if 'X' in dataset.metadata.layers_names:
# Shape we determine here due to the non-standard representation of 'X' dimensions
shape = anndata_file['X'].attrs.get("shape")
if shape is not None:
dataset.metadata.shape = tuple(shape)
elif hasattr(anndata_file['X'], 'shape'):
dataset.metadata.shape = tuple(anndata_file['X'].shape)
if dataset.metadata.shape is None:
dataset.metadata.shape = (int(dataset.metadata.obs_size), int(dataset.metadata.var_size))
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
tmp = dataset.metadata
def _makelayerstrings(layer, count, names):
"Format the layers."
if layer in tmp.layers_names:
return "\n[%s]: %d %s\n %s" % (
layer,
count,
"layer" if count == 1 else "layers",
', '.join(sorted(names))
)
return ""
peekstr = "[n_obs x n_vars]\n %d x %d" % tuple(tmp.shape)
peekstr += _makelayerstrings("obs", tmp.obs_count, tmp.obs_layers)
peekstr += _makelayerstrings("var", tmp.var_count, tmp.var_layers)
peekstr += _makelayerstrings("obsm", tmp.obsm_count, tmp.obsm_layers)
peekstr += _makelayerstrings("varm", tmp.varm_count, tmp.varm_layers)
peekstr += _makelayerstrings("uns", tmp.uns_count, tmp.uns_layers)
dataset.peek = peekstr
dataset.blurb = f"Anndata file ({nice_size(dataset.get_size())})"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary Anndata file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class GmxBinary(Binary):
"""
Base class for GROMACS binary files - xtc, trr, cpt
"""
magic_number: Optional[int] = None # variables to be overwritten in the child class
file_ext = ""
def sniff_prefix(self, sniff_prefix):
# The first 4 bytes of any GROMACS binary file containing the magic number
return sniff_prefix.magic_header('>1i') == self.magic_number
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"Binary GROMACS {self.file_ext} file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary GROMACS {self.file_ext} trajectory file ({nice_size(dataset.get_size())})"
class Trr(GmxBinary):
"""
Class describing an trr file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.trr')
>>> Trr().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Trr().sniff(fname)
False
"""
file_ext = "trr"
magic_number = 1993 # magic number reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/trrio.cpp
class Cpt(GmxBinary):
"""
Class describing a checkpoint (.cpt) file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.cpt')
>>> Cpt().sniff(fname)
True
>>> fname = get_test_fname('md.trr')
>>> Cpt().sniff(fname)
False
"""
file_ext = "cpt"
magic_number = 171817 # magic number reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/checkpoint.cpp
class Xtc(GmxBinary):
"""
Class describing an xtc file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.xtc')
>>> Xtc().sniff(fname)
True
>>> fname = get_test_fname('md.trr')
>>> Xtc().sniff(fname)
False
"""
file_ext = "xtc"
magic_number = 1995 # reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/xtcio.cpp
class Edr(GmxBinary):
"""
Class describing an edr file from the GROMACS suite
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('md.edr')
>>> Edr().sniff(fname)
True
>>> fname = get_test_fname('md.trr')
>>> Edr().sniff(fname)
False
"""
file_ext = "edr"
magic_number = -55555 # reference: https://github.com/gromacs/gromacs/blob/cec211b2c835ba6e8ea849fb1bf67d7fc19693a4/src/gromacs/fileio/enxio.cpp
class Biom2(H5):
"""
Class describing a biom2 file (http://biom-format.org/documentation/biom_format.html)
"""
MetadataElement(name="id", default=None, desc="table id", readonly=True, visible=True, no_value=None)
MetadataElement(name="format_url", default=None, desc="format-url", readonly=True, visible=True, no_value=None)
MetadataElement(name="format_version", default=None, desc="format-version", readonly=True, visible=True, no_value=None)
MetadataElement(name="format", default=None, desc="format", readonly=True, visible=True, no_value=None)
MetadataElement(name="type", default=None, desc="table type", readonly=True, visible=True, no_value=None)
MetadataElement(name="generated_by", default=None, desc="generated by", readonly=True, visible=True, no_value=None)
MetadataElement(name="creation_date", default=None, desc="creation date", readonly=True, visible=True, no_value=None)
MetadataElement(name="nnz", default=-1, desc="nnz: The number of non-zero elements in the table", readonly=True, visible=True, no_value=-1)
MetadataElement(name="shape", default=(), desc="shape: The number of rows and columns in the dataset", readonly=True, visible=True, no_value=())
file_ext = "biom2"
edam_format = "format_3746"
def sniff(self, filename):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom2')
>>> Biom2().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Biom2().sniff(fname)
False
>>> fname = get_test_fname('wiggle.wig')
>>> Biom2().sniff(fname)
False
"""
if super().sniff(filename):
with h5py.File(filename, 'r') as f:
required_fields = {'id', 'format-url', 'type', 'generated-by', 'creation-date', 'nnz', 'shape'}
return required_fields.issubset(f.attrs.keys())
return False
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
with h5py.File(dataset.file_name, 'r') as f:
attributes = f.attrs
dataset.metadata.id = util.unicodify(attributes['id'])
dataset.metadata.format_url = util.unicodify(attributes['format-url'])
if 'format-version' in attributes: # biom 2.1
dataset.metadata.format_version = '.'.join(str(_) for _ in attributes['format-version'])
elif 'format' in attributes: # biom 2.0
dataset.metadata.format = util.unicodify(attributes['format'])
dataset.metadata.type = util.unicodify(attributes['type'])
dataset.metadata.shape = tuple(int(_) for _ in attributes['shape'])
dataset.metadata.generated_by = util.unicodify(attributes['generated-by'])
dataset.metadata.creation_date = util.unicodify(attributes['creation-date'])
dataset.metadata.nnz = int(attributes['nnz'])
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, util.unicodify(e))
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = ['Biom2 (HDF5) file']
try:
with h5py.File(dataset.file_name) as f:
for k, v in f.attrs.items():
lines.append(f'{k}: {util.unicodify(v)}')
except Exception as e:
log.warning('%s, set_peek Exception: %s', self, util.unicodify(e))
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Biom2 (HDF5) file ({nice_size(dataset.get_size())})"
class Cool(H5):
"""
Class describing the cool format (https://github.com/mirnylab/cooler)
"""
file_ext = "cool"
def sniff(self, filename):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('matrix.cool')
>>> Cool().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Cool().sniff(fname)
False
>>> fname = get_test_fname('wiggle.wig')
>>> Cool().sniff(fname)
False
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom2')
>>> Cool().sniff(fname)
False
"""
MAGIC = "HDF5::Cooler"
URL = "https://github.com/mirnylab/cooler"
if super().sniff(filename):
keys = ['chroms', 'bins', 'pixels', 'indexes']
with h5py.File(filename, 'r') as handle:
fmt = util.unicodify(handle.attrs.get('format'))
url = util.unicodify(handle.attrs.get('format-url'))
if fmt == MAGIC or url == URL:
if not all(name in handle.keys() for name in keys):
return False
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Cool (HDF5) file for storing genomic interaction data."
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Cool (HDF5) file ({nice_size(dataset.get_size())})."
class MCool(H5):
"""
Class describing the multi-resolution cool format (https://github.com/mirnylab/cooler)
"""
file_ext = "mcool"
def sniff(self, filename):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('matrix.mcool')
>>> MCool().sniff(fname)
True
>>> fname = get_test_fname('matrix.cool')
>>> MCool().sniff(fname)
False
>>> fname = get_test_fname('test.mz5')
>>> MCool().sniff(fname)
False
>>> fname = get_test_fname('wiggle.wig')
>>> MCool().sniff(fname)
False
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom2')
>>> MCool().sniff(fname)
False
"""
MAGIC = "HDF5::Cooler"
URL = "https://github.com/mirnylab/cooler"
if super().sniff(filename):
keys0 = ['resolutions']
with h5py.File(filename, 'r') as handle:
if not all(name in handle.keys() for name in keys0):
return False
res0 = next(iter(handle['resolutions'].keys()))
keys = ['chroms', 'bins', 'pixels', 'indexes']
fmt = util.unicodify(handle['resolutions'][res0].attrs.get('format'))
url = util.unicodify(handle['resolutions'][res0].attrs.get('format-url'))
if fmt == MAGIC or url == URL:
if not all(name in handle['resolutions'][res0].keys() for name in keys):
return False
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Multi-resolution Cool (HDF5) file for storing genomic interaction data."
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"MCool (HDF5) file ({nice_size(dataset.get_size())})."
class H5MLM(H5):
"""
Machine learning model generated by Galaxy-ML.
"""
file_ext = "h5mlm"
URL = "https://github.com/goeckslab/Galaxy-ML"
max_peek_size = 1000 # 1 KB
max_preview_size = 1000000 # 1 MB
MetadataElement(name="hyper_params", desc="Hyperparameter File", param=FileParameter, file_ext="tabular", readonly=True, no_value=None, visible=False, optional=True)
def set_meta(self, dataset, overwrite=True, **kwd):
try:
spec_key = "hyper_params"
params_file = dataset.metadata.hyper_params
if not params_file:
params_file = dataset.metadata.spec[spec_key].param.new_file(dataset=dataset)
with h5py.File(dataset.file_name, "r") as handle:
hyper_params = handle["-model_hyperparameters-"][()]
hyper_params = json.loads(util.unicodify(hyper_params))
with open(params_file.file_name, "w") as f:
f.write("\tParameter\tValue\n")
for p in hyper_params:
f.write("\t".join(p) + "\n")
dataset.metadata.hyper_params = params_file
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
keys = ["-model_config-"]
with h5py.File(filename, "r") as handle:
if not all(name in handle.keys() for name in keys):
return False
url = util.unicodify(handle.attrs.get("-URL-"))
if url == self.URL:
return True
return False
def get_repr(self, filename):
try:
with h5py.File(filename, "r") as handle:
repr_ = util.unicodify(handle.attrs.get("-repr-"))
return repr_
except Exception as e:
log.warning('%s, get_repr Except: %s', self, e)
return ""
def get_config_string(self, filename):
try:
with h5py.File(filename, "r") as handle:
config = util.unicodify(handle["-model_config-"][()])
return config
except Exception as e:
log.warning('%s, get model configuration Except: %s', self, e)
return ""
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
repr_ = self.get_repr(dataset.file_name)
dataset.peek = repr_[:self.max_peek_size]
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "HDF5 Model (%s)" % (nice_size(dataset.get_size()))
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, **kwd):
preview = util.string_as_bool(preview)
if to_ext or not preview:
to_ext = to_ext or dataset.extension
return self._serve_raw(trans, dataset, to_ext, **kwd)
rval = {}
try:
with h5py.File(dataset.file_name, "r") as handle:
rval['Attributes'] = {}
attributes = handle.attrs
for k in (set(attributes.keys()) - {'-URL-', '-repr-'}):
rval['Attributes'][k] = util.unicodify(attributes.get(k))
except Exception as e:
log.warning(e)
config = self.get_config_string(dataset.file_name)
rval['Config'] = json.loads(config) if config else ''
rval = json.dumps(rval, sort_keys=True, indent=2)
rval = rval[:self.max_preview_size]
repr_ = self.get_repr(dataset.file_name)
return f"<pre>{repr_}</pre><pre>{rval}</pre>"
class HexrdMaterials(H5):
"""
Class describing a Hexrd Materials file: https://github.com/HEXRD/hexrd
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.materials.h5')
>>> HexrdMaterials().sniff(fname)
True
>>> fname = get_test_fname('test.loom')
>>> HexrdMaterials().sniff(fname)
False
"""
file_ext = "hexrd.materials.h5"
edam_format = "format_3590"
MetadataElement(name="materials", desc="materials", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None)
MetadataElement(name="SpaceGroupNumber", default={}, param=DictParameter, desc="SpaceGroupNumber", readonly=True, visible=True, no_value={})
MetadataElement(name="LatticeParameters", default={}, param=DictParameter, desc="LatticeParameters", readonly=True, visible=True, no_value={})
def sniff(self, filename):
if super().sniff(filename):
req = {'AtomData', 'Atomtypes', 'CrystalSystem', 'LatticeParameters'}
with h5py.File(filename, 'r') as mat_file:
for k in mat_file.keys():
if isinstance(mat_file[k], h5py._hl.group.Group) and set(mat_file[k].keys()) >= req:
return True
return False
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
with h5py.File(dataset.file_name, 'r') as mat_file:
dataset.metadata.materials = list(mat_file.keys())
sgn = dict()
lp = dict()
for m in mat_file.keys():
if 'SpaceGroupNumber' in mat_file[m] and len(mat_file[m]['SpaceGroupNumber']) > 0:
sgn[m] = mat_file[m]['SpaceGroupNumber'][0].item()
if 'LatticeParameters' in mat_file[m]:
lp[m] = mat_file[m]['LatticeParameters'][0:].tolist()
dataset.metadata.SpaceGroupNumber = sgn
dataset.metadata.LatticeParameters = lp
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = ['Material SpaceGroup Lattice']
if dataset.metadata.materials:
for m in dataset.metadata.materials:
try:
lines.append(f'{m} {dataset.metadata.SpaceGroupNumber[m]} {dataset.metadata.LatticeParameters[m]}')
except Exception:
continue
dataset.peek = '\n'.join(lines)
dataset.blurb = f"Materials: {' '.join(dataset.metadata.materials)}"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class Scf(Binary):
"""Class describing an scf binary sequence file"""
edam_format = "format_1632"
edam_data = "data_0924"
file_ext = "scf"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary scf sequence file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary scf sequence file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class Sff(Binary):
""" Standard Flowgram Format (SFF) """
edam_format = "format_3284"
edam_data = "data_0924"
file_ext = "sff"
def sniff_prefix(self, sniff_prefix):
# The first 4 bytes of any sff file is '.sff', and the file is binary. For details
# about the format, see http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=format
return sniff_prefix.startswith_bytes(b'.sff')
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary sff file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary sff file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class BigWig(Binary):
"""
Accessing binary BigWig files from UCSC.
The supplemental info in the paper has the binary details:
http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btq351v1
"""
edam_format = "format_3006"
edam_data = "data_3002"
file_ext = "bigwig"
track_type = "LineTrack"
data_sources = {"data_standalone": "bigwig"}
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = 0x888FFC26
self._name = "BigWig"
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.magic_header("I") == self._magic
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"Binary UCSC {self._name} file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary UCSC {self._name} file ({nice_size(dataset.get_size())})"
class BigBed(BigWig):
"""BigBed support from UCSC."""
edam_format = "format_3004"
edam_data = "data_3002"
file_ext = "bigbed"
data_sources = {"data_standalone": "bigbed"}
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self._magic = 0x8789F2EB
self._name = "BigBed"
@build_sniff_from_prefix
class TwoBit(Binary):
"""Class describing a TwoBit format nucleotide file"""
edam_format = "format_3009"
edam_data = "data_0848"
file_ext = "twobit"
def sniff_prefix(self, sniff_prefix):
magic = sniff_prefix.magic_header(">L")
return magic == TWOBIT_MAGIC_NUMBER or magic == TWOBIT_MAGIC_NUMBER_SWAP
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary TwoBit format nucleotide file"
dataset.blurb = nice_size(dataset.get_size())
else:
return super().set_peek(dataset)
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary TwoBit format nucleotide file ({nice_size(dataset.get_size())})"
@dataproviders.decorators.has_dataproviders
class SQlite(Binary):
"""Class describing a Sqlite database """
MetadataElement(name="tables", default=[], param=ListParameter, desc="Database Tables", readonly=True, visible=True, no_value=[])
MetadataElement(name="table_columns", default={}, param=DictParameter, desc="Database Table Columns", readonly=True, visible=True, no_value={})
MetadataElement(name="table_row_count", default={}, param=DictParameter, desc="Database Table Row Count", readonly=True, visible=True, no_value={})
file_ext = "sqlite"
edam_format = "format_3621"
def init_meta(self, dataset, copy_from=None):
Binary.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, **kwd):
try:
tables = []
columns = dict()
rowcounts = dict()
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT name,sql FROM sqlite_master WHERE type='table' ORDER BY name"
rslt = c.execute(tables_query).fetchall()
for table, _ in rslt:
tables.append(table)
try:
col_query = f'SELECT * FROM {table} LIMIT 0'
cur = conn.cursor().execute(col_query)
cols = [col[0] for col in cur.description]
columns[table] = cols
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
for table in tables:
try:
row_query = f"SELECT count(*) FROM {table}"
rowcounts[table] = c.execute(row_query).fetchone()[0]
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
dataset.metadata.tables = tables
dataset.metadata.table_columns = columns
dataset.metadata.table_row_count = rowcounts
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
def sniff(self, filename):
# The first 16 bytes of any SQLite3 database file is 'SQLite format 3\0', and the file is binary. For details
# about the format, see http://www.sqlite.org/fileformat.html
try:
header = open(filename, 'rb').read(16)
if header == b'SQLite format 3\0':
return True
return False
except Exception:
return False
def sniff_table_names(self, filename, table_names):
# All table names should be in the schema
try:
conn = sqlite.connect(filename)
c = conn.cursor()
tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
result = c.execute(tables_query).fetchall()
result = [_[0] for _ in result]
for table_name in table_names:
if table_name not in result:
return False
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "SQLite Database"
lines = ['SQLite Database']
if dataset.metadata.tables:
for table in dataset.metadata.tables:
try:
lines.append(f'{table} [{dataset.metadata.table_row_count[table]}]')
except Exception:
continue
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"SQLite Database ({nice_size(dataset.get_size())})"
@dataproviders.decorators.dataprovider_factory('sqlite', dataproviders.dataset.SQliteDataProvider.settings)
def sqlite_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('sqlite-table', dataproviders.dataset.SQliteDataTableProvider.settings)
def sqlite_datatableprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataTableProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('sqlite-dict', dataproviders.dataset.SQliteDataDictProvider.settings)
def sqlite_datadictprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataDictProvider(dataset_source, **settings)
class GeminiSQLite(SQlite):
"""Class describing a Gemini Sqlite database """
MetadataElement(name="gemini_version", default='0.10.0', param=MetadataParameter, desc="Gemini Version",
readonly=True, visible=True, no_value='0.10.0')
file_ext = "gemini.sqlite"
edam_format = "format_3622"
edam_data = "data_3498"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT version FROM version"
result = c.execute(tables_query).fetchall()
for version, in result:
dataset.metadata.gemini_version = version
# TODO: Can/should we detect even more attributes, such as use of PED file, what was input annotation type, etc.
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["gene_detailed", "gene_summary", "resources", "sample_genotype_counts",
"sample_genotypes", "samples", "variant_impacts", "variants", "version"]
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Gemini SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Gemini SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
class ChiraSQLite(SQlite):
"""Class describing a ChiRAViz Sqlite database """
file_ext = "chira.sqlite"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
self.sniff_table_names(filename, ['Chimeras'])
return False
class CuffDiffSQlite(SQlite):
"""Class describing a CuffDiff SQLite database """
MetadataElement(name="cuffdiff_version", default='2.2.1', param=MetadataParameter, desc="CuffDiff Version",
readonly=True, visible=True, no_value='2.2.1')
MetadataElement(name="genes", default=[], param=MetadataParameter, desc="Genes",
readonly=True, visible=True, no_value=[])
MetadataElement(name="samples", default=[], param=MetadataParameter, desc="Samples",
readonly=True, visible=True, no_value=[])
file_ext = "cuffdiff.sqlite"
# TODO: Update this when/if there is a specific EDAM format for CuffDiff SQLite data.
edam_format = "format_3621"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
genes = []
samples = []
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT value FROM runInfo where param = 'version'"
result = c.execute(tables_query).fetchall()
for version, in result:
dataset.metadata.cuffdiff_version = version
genes_query = 'SELECT gene_id, gene_short_name FROM genes ORDER BY gene_short_name'
result = c.execute(genes_query).fetchall()
for gene_id, gene_name in result:
if gene_name is None:
continue
gene = f'{gene_id}: {gene_name}'
if gene not in genes:
genes.append(gene)
samples_query = 'SELECT DISTINCT(sample_name) as sample_name FROM samples ORDER BY sample_name'
result = c.execute(samples_query).fetchall()
for sample_name, in result:
if sample_name not in samples:
samples.append(sample_name)
dataset.metadata.genes = genes
dataset.metadata.samples = samples
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
# These tables should be in any CuffDiff SQLite output.
table_names = ['CDS', 'genes', 'isoforms', 'replicates', 'runInfo', 'samples', 'TSS']
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "CuffDiff SQLite Database, version %s" % (dataset.metadata.cuffdiff_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "CuffDiff SQLite Database, version %s" % (dataset.metadata.cuffdiff_version or 'unknown')
class MzSQlite(SQlite):
"""Class describing a Proteomics Sqlite database """
file_ext = "mz.sqlite"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["DBSequence", "Modification", "Peaks", "Peptide", "PeptideEvidence",
"Score", "SearchDatabase", "Source", "SpectraData", "Spectrum", "SpectrumIdentification"]
return self.sniff_table_names(filename, table_names)
return False
class PQP(SQlite):
"""
Class describing a Peptide query parameters file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.pqp')
>>> PQP().sniff(fname)
True
>>> fname = get_test_fname('test.osw')
>>> PQP().sniff(fname)
False
"""
file_ext = "pqp"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
"""
table definition according to https://github.com/grosenberger/OpenMS/blob/develop/src/openms/source/ANALYSIS/OPENSWATH/TransitionPQPFile.cpp#L264
for now VERSION GENE PEPTIDE_GENE_MAPPING are excluded, since
there is test data wo these tables, see also here https://github.com/OpenMS/OpenMS/issues/4365
"""
if not super().sniff(filename):
return False
table_names = ['COMPOUND', 'PEPTIDE', 'PEPTIDE_PROTEIN_MAPPING', 'PRECURSOR',
'PRECURSOR_COMPOUND_MAPPING', 'PRECURSOR_PEPTIDE_MAPPING', 'PROTEIN',
'TRANSITION', 'TRANSITION_PEPTIDE_MAPPING', 'TRANSITION_PRECURSOR_MAPPING']
osw_table_names = ['FEATURE', 'FEATURE_MS1', 'FEATURE_MS2', 'FEATURE_TRANSITION', 'RUN']
return self.sniff_table_names(filename, table_names) and not self.sniff_table_names(filename, osw_table_names)
class OSW(SQlite):
"""
Class describing OpenSwath output
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.osw')
>>> OSW().sniff(fname)
True
>>> fname = get_test_fname('test.sqmass')
>>> OSW().sniff(fname)
False
"""
file_ext = "osw"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
# osw seems to be an extension of pqp (few tables are added)
# see also here https://github.com/OpenMS/OpenMS/issues/4365
if not super().sniff(filename):
return False
table_names = ['COMPOUND', 'PEPTIDE', 'PEPTIDE_PROTEIN_MAPPING', 'PRECURSOR',
'PRECURSOR_COMPOUND_MAPPING', 'PRECURSOR_PEPTIDE_MAPPING', 'PROTEIN',
'TRANSITION', 'TRANSITION_PEPTIDE_MAPPING', 'TRANSITION_PRECURSOR_MAPPING',
'FEATURE', 'FEATURE_MS1', 'FEATURE_MS2', 'FEATURE_TRANSITION', 'RUN']
return self.sniff_table_names(filename, table_names)
class SQmass(SQlite):
"""
Class describing a Sqmass database
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.sqmass')
>>> SQmass().sniff(fname)
True
>>> fname = get_test_fname('test.pqp')
>>> SQmass().sniff(fname)
False
"""
file_ext = "sqmass"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["CHROMATOGRAM", "PRECURSOR", "RUN", "SPECTRUM", "DATA", "PRODUCT", "RUN_EXTRA"]
return self.sniff_table_names(filename, table_names)
return False
class BlibSQlite(SQlite):
"""Class describing a Proteomics Spectral Library Sqlite database """
MetadataElement(name="blib_version", default='1.8', param=MetadataParameter, desc="Blib Version",
readonly=True, visible=True, no_value='1.8')
file_ext = "blib"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT majorVersion,minorVersion FROM LibInfo"
(majorVersion, minorVersion) = c.execute(tables_query).fetchall()[0]
dataset.metadata.blib_version = f'{majorVersion}.{minorVersion}'
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ['IonMobilityTypes', 'LibInfo', 'Modifications', 'RefSpectra',
'RefSpectraPeakAnnotations', 'RefSpectraPeaks', 'ScoreTypes', 'SpectrumSourceFiles']
return self.sniff_table_names(filename, table_names)
return False
class DlibSQlite(SQlite):
"""
Class describing a Proteomics Spectral Library Sqlite database
DLIBs only have the "entries", "metadata", and "peptidetoprotein" tables populated.
ELIBs have the rest of the tables populated too, such as "peptidequants" or "peptidescores".
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.dlib')
>>> DlibSQlite().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> DlibSQlite().sniff(fname)
False
"""
MetadataElement(name="dlib_version", default='1.8', param=MetadataParameter, desc="Dlib Version",
readonly=True, visible=True, no_value='1.8')
file_ext = "dlib"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT Value FROM metadata WHERE Key = 'version'"
version = c.execute(tables_query).fetchall()[0]
dataset.metadata.dlib_version = f'{version}'
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ['entries', 'metadata', 'peptidetoprotein']
return self.sniff_table_names(filename, table_names)
return False
class ElibSQlite(SQlite):
"""
Class describing a Proteomics Chromatagram Library Sqlite database
DLIBs only have the "entries", "metadata", and "peptidetoprotein" tables populated.
ELIBs have the rest of the tables populated too, such as "peptidequants" or "peptidescores".
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.elib')
>>> ElibSQlite().sniff(fname)
True
>>> fname = get_test_fname('test.dlib')
>>> ElibSQlite().sniff(fname)
False
"""
MetadataElement(name="version", default='0.1.14', param=MetadataParameter, desc="Elib Version",
readonly=True, visible=True, no_value='0.1.14')
file_ext = "elib"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT Value FROM metadata WHERE Key = 'version'"
version = c.execute(tables_query).fetchall()[0]
dataset.metadata.dlib_version = f'{version}'
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = ['entries', 'fragmentquants', 'metadata', 'peptidelocalizations', 'peptidequants',
'peptidescores', 'peptidetoprotein', 'proteinscores', 'retentiontimes']
if self.sniff_table_names(filename, table_names):
try:
conn = sqlite.connect(filename)
c = conn.cursor()
row_query = "SELECT count(*) FROM peptidescores"
count = c.execute(row_query).fetchone()[0]
return int(count) > 0
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
class IdpDB(SQlite):
"""
Class describing an IDPicker 3 idpDB (sqlite) database
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.idpdb')
>>> IdpDB().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> IdpDB().sniff(fname)
False
"""
file_ext = "idpdb"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super().sniff(filename):
table_names = ["About", "Analysis", "AnalysisParameter", "PeptideSpectrumMatch",
"Spectrum", "SpectrumSource"]
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "IDPickerDB SQLite file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"IDPickerDB SQLite file ({nice_size(dataset.get_size())})"
class GAFASQLite(SQlite):
"""Class describing a GAFA SQLite database"""
MetadataElement(name='gafa_schema_version', default='0.3.0', param=MetadataParameter, desc='GAFA schema version',
readonly=True, visible=True, no_value='0.3.0')
file_ext = 'gafa.sqlite'
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
version_query = 'SELECT version FROM meta'
results = c.execute(version_query).fetchall()
if len(results) == 0:
raise Exception('version not found in meta table')
elif len(results) > 1:
raise Exception('Multiple versions found in meta table')
dataset.metadata.gafa_schema_version = results[0][0]
except Exception as e:
log.warning("%s, set_meta Exception: %s", self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = frozenset({'gene', 'gene_family', 'gene_family_member', 'meta', 'transcript'})
return self.sniff_table_names(filename, table_names)
return False
class NcbiTaxonomySQlite(SQlite):
"""Class describing the NCBI Taxonomy database stored in SQLite as done by rust-ncbitaxonomy"""
MetadataElement(name='ncbitaxonomy_schema_version', default='20200501095116', param=MetadataParameter, desc='ncbitaxonomy schema version',
readonly=True, visible=True, no_value='20200501095116')
MetadataElement(name="taxon_count", default=[], param=MetadataParameter, desc="Count of taxa in the taxonomy",
readonly=True, visible=True, no_value=[])
file_ext = 'ncbitaxonomy.sqlite'
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
version_query = 'SELECT version FROM __diesel_schema_migrations ORDER BY run_on DESC LIMIT 1'
results = c.execute(version_query).fetchall()
if len(results) == 0:
raise Exception('version not found in __diesel_schema_migrations table')
dataset.metadata.ncbitaxonomy_schema_version = results[0][0]
taxons_query = 'SELECT count(name) FROM taxonomy'
results = c.execute(taxons_query).fetchall()
if len(results) == 0:
raise Exception('could not count size of taxonomy table')
dataset.metadata.taxon_count = results[0][0]
except Exception as e:
log.warning("%s, set_meta Exception: %s", self, e)
def sniff(self, filename):
if super().sniff(filename):
table_names = frozenset({'__diesel_schema_migrations', 'taxonomy'})
return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "NCBI Taxonomy SQLite Database, version {} ({} taxons)".format(
getattr(dataset.metadata, "ncbitaxonomy_schema_version", "unknown"),
getattr(dataset.metadata, "taxon_count", "unknown")
)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "NCBI Taxonomy SQLite Database, version {} ({} taxons)".format(
getattr(dataset.metadata, "ncbitaxonomy_schema_version", "unknown"),
getattr(dataset.metadata, "taxon_count", "unknown")
)
class Xlsx(Binary):
"""Class for Excel 2007 (xlsx) files"""
file_ext = "xlsx"
compressed = True
def sniff(self, filename):
# Xlsx is compressed in zip format and must not be uncompressed in Galaxy.
try:
if zipfile.is_zipfile(filename):
tempzip = zipfile.ZipFile(filename)
if "[Content_Types].xml" in tempzip.namelist() and tempzip.read("[Content_Types].xml").find(b'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml') != -1:
return True
return False
except Exception:
return False
class ExcelXls(Binary):
"""Class describing an Excel (xls) file"""
file_ext = "excel.xls"
edam_format = "format_3468"
def sniff(self, filename):
mime_type = subprocess.check_output(['file', '--mime-type', filename])
return b"application/vnd.ms-excel" in mime_type
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'application/vnd.ms-excel'
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Microsoft Excel XLS file"
dataset.blurb = data.nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Microsoft Excel XLS file ({data.nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class Sra(Binary):
""" Sequence Read Archive (SRA) datatype originally from mdshw5/sra-tools-galaxy"""
file_ext = 'sra'
def sniff_prefix(self, sniff_prefix):
""" The first 8 bytes of any NCBI sra file is 'NCBI.sra', and the file is binary.
For details about the format, see http://www.ncbi.nlm.nih.gov/books/n/helpsra/SRA_Overview_BK/#SRA_Overview_BK.4_SRA_Data_Structure
"""
return sniff_prefix.startswith_bytes(b'NCBI.sra')
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = 'Binary sra file'
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f'Binary sra file ({nice_size(dataset.get_size())})'
class RData(Binary):
"""Generic R Data file datatype implementation"""
file_ext = 'rdata'
def sniff(self, filename):
rdata_header = b'RDX2\nX\n'
try:
header = open(filename, 'rb').read(7)
if header == rdata_header:
return True
header = gzip.open(filename).read(7)
if header == rdata_header:
return True
except Exception:
return False
class OxliBinary(Binary):
@staticmethod
def _sniff(filename, oxlitype):
try:
with open(filename, 'rb') as fileobj:
header = fileobj.read(4)
if header == b'OXLI':
fileobj.read(1) # skip the version number
ftype = fileobj.read(1)
if binascii.hexlify(ftype) == oxlitype:
return True
return False
except OSError:
return False
class OxliCountGraph(OxliBinary):
"""
OxliCountGraph starts with "OXLI" + one byte version number +
8-bit binary '1'
Test file generated via::
load-into-counting.py --n_tables 1 --max-tablesize 1 \\
oxli_countgraph.oxlicg khmer/tests/test-data/100-reads.fq.bz2
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliCountGraph().sniff(fname)
False
>>> fname = get_test_fname("oxli_countgraph.oxlicg")
>>> OxliCountGraph().sniff(fname)
True
"""
file_ext = 'oxlicg'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"01")
class OxliNodeGraph(OxliBinary):
"""
OxliNodeGraph starts with "OXLI" + one byte version number +
8-bit binary '2'
Test file generated via::
load-graph.py --n_tables 1 --max-tablesize 1 oxli_nodegraph.oxling \\
khmer/tests/test-data/100-reads.fq.bz2
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliNodeGraph().sniff(fname)
False
>>> fname = get_test_fname("oxli_nodegraph.oxling")
>>> OxliNodeGraph().sniff(fname)
True
"""
file_ext = 'oxling'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"02")
class OxliTagSet(OxliBinary):
"""
OxliTagSet starts with "OXLI" + one byte version number +
8-bit binary '3'
Test file generated via::
load-graph.py --n_tables 1 --max-tablesize 1 oxli_nodegraph.oxling \\
khmer/tests/test-data/100-reads.fq.bz2;
mv oxli_nodegraph.oxling.tagset oxli_tagset.oxlits
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliTagSet().sniff(fname)
False
>>> fname = get_test_fname("oxli_tagset.oxlits")
>>> OxliTagSet().sniff(fname)
True
"""
file_ext = 'oxlits'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"03")
class OxliStopTags(OxliBinary):
"""
OxliStopTags starts with "OXLI" + one byte version number +
8-bit binary '4'
Test file adapted from khmer 2.0's
"khmer/tests/test-data/goodversion-k32.stoptags"
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliStopTags().sniff(fname)
False
>>> fname = get_test_fname("oxli_stoptags.oxlist")
>>> OxliStopTags().sniff(fname)
True
"""
file_ext = 'oxlist'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"04")
class OxliSubset(OxliBinary):
"""
OxliSubset starts with "OXLI" + one byte version number +
8-bit binary '5'
Test file generated via::
load-graph.py -k 20 example tests/test-data/random-20-a.fa;
partition-graph.py example;
mv example.subset.0.pmap oxli_subset.oxliss
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliSubset().sniff(fname)
False
>>> fname = get_test_fname("oxli_subset.oxliss")
>>> OxliSubset().sniff(fname)
True
"""
file_ext = 'oxliss'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"05")
class OxliGraphLabels(OxliBinary):
"""
OxliGraphLabels starts with "OXLI" + one byte version number +
8-bit binary '6'
Test file generated via::
python -c "from khmer import GraphLabels; \\
gl = GraphLabels(20, 1e7, 4); \\
gl.consume_fasta_and_tag_with_labels('tests/test-data/test-labels.fa'); \\
gl.save_labels_and_tags('oxli_graphlabels.oxligl')"
using khmer 2.0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sequence.csfasta')
>>> OxliGraphLabels().sniff(fname)
False
>>> fname = get_test_fname("oxli_graphlabels.oxligl")
>>> OxliGraphLabels().sniff(fname)
True
"""
file_ext = 'oxligl'
def sniff(self, filename):
return OxliBinary._sniff(filename, b"06")
class PostgresqlArchive(CompressedArchive):
"""
Class describing a Postgresql database packed into a tar archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('postgresql_fake.tar.bz2')
>>> PostgresqlArchive().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar')
>>> PostgresqlArchive().sniff(fname)
False
"""
MetadataElement(name="version", default=None, param=MetadataParameter, desc="PostgreSQL database version",
readonly=True, visible=True, no_value=None)
file_ext = "postgresql"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
if dataset and tarfile.is_tarfile(dataset.file_name):
with tarfile.open(dataset.file_name, 'r') as temptar:
pg_version_file = temptar.extractfile('postgresql/db/PG_VERSION')
dataset.metadata.version = util.unicodify(pg_version_file.read()).strip()
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, util.unicodify(e))
def sniff(self, filename):
if filename and tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as temptar:
return 'postgresql/db/PG_VERSION' in temptar.getnames()
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"PostgreSQL Archive ({nice_size(dataset.get_size())})"
dataset.blurb = "PostgreSQL version %s" % (dataset.metadata.version or 'unknown')
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"PostgreSQL Archive ({nice_size(dataset.get_size())})"
class Fast5Archive(CompressedArchive):
"""
Class describing a FAST5 archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.fast5.tar')
>>> Fast5Archive().sniff(fname)
True
"""
MetadataElement(name="fast5_count", default='0', param=MetadataParameter, desc="Read Count",
readonly=True, visible=True, no_value=None)
file_ext = "fast5.tar"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
if dataset and tarfile.is_tarfile(dataset.file_name):
with tarfile.open(dataset.file_name, 'r') as temptar:
dataset.metadata.fast5_count = sum(
1 for f in temptar if f.name.endswith('.fast5')
)
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
try:
if filename and tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as temptar:
for f in temptar:
if not f.isfile():
continue
if f.name.endswith('.fast5'):
return True
else:
return False
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"FAST5 Archive ({nice_size(dataset.get_size())})"
dataset.blurb = "%s sequences" % (dataset.metadata.fast5_count or 'unknown')
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"FAST5 Archive ({nice_size(dataset.get_size())})"
class Fast5ArchiveGz(Fast5Archive):
"""
Class describing a gzip-compressed FAST5 archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.fast5.tar.gz')
>>> Fast5ArchiveGz().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar.bz2')
>>> Fast5ArchiveGz().sniff(fname)
False
>>> fname = get_test_fname('test.fast5.tar')
>>> Fast5ArchiveGz().sniff(fname)
False
"""
file_ext = "fast5.tar.gz"
def sniff(self, filename):
if not is_gzip(filename):
return False
return Fast5Archive.sniff(self, filename)
class Fast5ArchiveBz2(Fast5Archive):
"""
Class describing a bzip2-compressed FAST5 archive
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.fast5.tar.bz2')
>>> Fast5ArchiveBz2().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar.gz')
>>> Fast5ArchiveBz2().sniff(fname)
False
>>> fname = get_test_fname('test.fast5.tar')
>>> Fast5ArchiveBz2().sniff(fname)
False
"""
file_ext = "fast5.tar.bz2"
def sniff(self, filename):
if not is_bz2(filename):
return False
return Fast5Archive.sniff(self, filename)
class SearchGuiArchive(CompressedArchive):
"""Class describing a SearchGUI archive """
MetadataElement(name="searchgui_version", default='1.28.0', param=MetadataParameter, desc="SearchGui Version",
readonly=True, visible=True, no_value=None)
MetadataElement(name="searchgui_major_version", default='1', param=MetadataParameter, desc="SearchGui Major Version",
readonly=True, visible=True, no_value=None)
file_ext = "searchgui_archive"
def set_meta(self, dataset, overwrite=True, **kwd):
super().set_meta(dataset, overwrite=overwrite, **kwd)
try:
if dataset and zipfile.is_zipfile(dataset.file_name):
with zipfile.ZipFile(dataset.file_name) as tempzip:
if 'searchgui.properties' in tempzip.namelist():
with tempzip.open('searchgui.properties') as fh:
for line in io.TextIOWrapper(fh):
if line.startswith('searchgui.version'):
version = line.split('=')[1].strip()
dataset.metadata.searchgui_version = version
dataset.metadata.searchgui_major_version = version.split('.')[0]
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
try:
if filename and zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename, 'r') as tempzip:
is_searchgui = 'searchgui.properties' in tempzip.namelist()
return is_searchgui
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "SearchGUI Archive, version %s" % (dataset.metadata.searchgui_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "SearchGUI Archive, version %s" % (dataset.metadata.searchgui_version or 'unknown')
@build_sniff_from_prefix
class NetCDF(Binary):
"""Binary data in netCDF format"""
file_ext = "netcdf"
edam_format = "format_3650"
edam_data = "data_0943"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary netCDF file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary netCDF file ({nice_size(dataset.get_size())})"
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(b'CDF')
class Dcd(Binary):
"""
Class describing a dcd file from the CHARMM molecular simulation program
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test_glucose_vacuum.dcd')
>>> Dcd().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Dcd().sniff(fname)
False
"""
file_ext = "dcd"
edam_data = "data_3842"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic_number = b'CORD'
def sniff(self, filename):
# Match the keyword 'CORD' at position 4 or 8 - intsize dependent
# Not checking for endianness
try:
with open(filename, 'rb') as header:
intsize = 4
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
else:
intsize = 8
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary CHARMM/NAMD dcd file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary CHARMM/NAMD dcd file ({nice_size(dataset.get_size())})"
class Vel(Binary):
"""
Class describing a velocity file from the CHARMM molecular simulation program
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test_charmm.vel')
>>> Vel().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Vel().sniff(fname)
False
"""
file_ext = "vel"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic_number = b'VELD'
def sniff(self, filename):
# Match the keyword 'VELD' at position 4 or 8 - intsize dependent
# Not checking for endianness
try:
with open(filename, 'rb') as header:
intsize = 4
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
else:
intsize = 8
header.seek(intsize)
if header.read(intsize) == self._magic_number:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary CHARMM velocity file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"Binary CHARMM velocity file ({nice_size(dataset.get_size())})"
@build_sniff_from_prefix
class DAA(Binary):
"""
Class describing an DAA (diamond alignment archive) file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('diamond.daa')
>>> DAA().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> DAA().sniff(fname)
False
"""
file_ext = "daa"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("6be33e6d47530e3c")
def sniff_prefix(self, sniff_prefix):
# The first 8 bytes of any daa file are 0x3c0e53476d3ee36b
return sniff_prefix.startswith_bytes(self._magic)
@build_sniff_from_prefix
class RMA6(Binary):
"""
Class describing an RMA6 (MEGAN6 read-match archive) file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('diamond.rma6')
>>> RMA6().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> RMA6().sniff(fname)
False
"""
file_ext = "rma6"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("000003f600000006")
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(self._magic)
@build_sniff_from_prefix
class DMND(Binary):
"""
Class describing an DMND file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('diamond_db.dmnd')
>>> DMND().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> DMND().sniff(fname)
False
"""
file_ext = "dmnd"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("6d18ee15a4f84a02")
def sniff_prefix(self, sniff_prefix):
# The first 8 bytes of any dmnd file are 0x24af8a415ee186d
return sniff_prefix.startswith_bytes(self._magic)
class ICM(Binary):
"""
Class describing an ICM (interpolated context model) file, used by Glimmer
"""
file_ext = "icm"
edam_data = "data_0950"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary ICM (interpolated context model) file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, dataset):
line = open(dataset).read(100)
if '>ver = ' in line and 'len = ' in line and 'depth = ' in line and 'periodicity =' in line and 'nodes = ' in line:
return True
return False
@build_sniff_from_prefix
class Parquet(Binary):
"""
Class describing Apache Parquet file (https://parquet.apache.org/)
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('example.parquet')
>>> Parquet().sniff(fname)
True
>>> fname = get_test_fname('test.mz5')
>>> Parquet().sniff(fname)
False
"""
file_ext = "parquet"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = b"PAR1" # Defined at https://parquet.apache.org/documentation/latest/
def sniff_prefix(self, sniff_prefix):
return sniff_prefix.startswith_bytes(self._magic)
class BafTar(CompressedArchive):
"""
Base class for common behavior of tar files of directory-based raw file formats
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('brukerbaf.d.tar')
>>> BafTar().sniff(fname)
True
>>> fname = get_test_fname('test.fast5.tar')
>>> BafTar().sniff(fname)
False
"""
edam_data = "data_2536" # mass spectrometry data
edam_format = "format_3712" # TODO: add more raw formats to EDAM?
file_ext = "brukerbaf.d.tar"
def get_signature_file(self):
return "analysis.baf"
def sniff(self, filename):
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as rawtar:
return self.get_signature_file() in [os.path.basename(f).lower() for f in rawtar.getnames()]
return False
def get_type(self):
return "Bruker BAF directory archive"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = self.get_type()
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return f"{self.get_type()} ({nice_size(dataset.get_size())})"
class YepTar(BafTar):
""" A tar'd up .d directory containing Agilent/Bruker YEP format data """
file_ext = "agilentbrukeryep.d.tar"
def get_signature_file(self):
return "analysis.yep"
def get_type(self):
return "Agilent/Bruker YEP directory archive"
class TdfTar(BafTar):
""" A tar'd up .d directory containing Bruker TDF format data """
file_ext = "brukertdf.d.tar"
def get_signature_file(self):
return "analysis.tdf"
def get_type(self):
return "Bruker TDF directory archive"
class MassHunterTar(BafTar):
""" A tar'd up .d directory containing Agilent MassHunter format data """
file_ext = "agilentmasshunter.d.tar"
def get_signature_file(self):
return "msscan.bin"
def get_type(self):
return "Agilent MassHunter directory archive"
class MassLynxTar(BafTar):
""" A tar'd up .d directory containing Waters MassLynx format data """
file_ext = "watersmasslynx.raw.tar"
def get_signature_file(self):
return "_func001.dat"
def get_type(self):
return "Waters MassLynx RAW directory archive"
class WiffTar(BafTar):
"""
A tar'd up .wiff/.scan pair containing Sciex WIFF format data
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('some.wiff.tar')
>>> WiffTar().sniff(fname)
True
>>> fname = get_test_fname('brukerbaf.d.tar')
>>> WiffTar().sniff(fname)
False
>>> fname = get_test_fname('test.fast5.tar')
>>> WiffTar().sniff(fname)
False
"""
file_ext = "wiff.tar"
def sniff(self, filename):
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as rawtar:
return ".wiff" in [os.path.splitext(os.path.basename(f).lower())[1] for f in rawtar.getnames()]
return False
def get_type(self):
return "Sciex WIFF/SCAN archive"
@build_sniff_from_prefix
class Pretext(Binary):
"""
PretextMap contact map file
Try to guess if the file is a Pretext file.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('sample.pretext')
>>> Pretext().sniff(fname)
True
"""
file_ext = "pretext"
def sniff_prefix(self, sniff_prefix):
# The first 4 bytes of any pretext file is 'pstm', and the rest of the
# file contains binary data.
return sniff_prefix.startswith_bytes(b'pstm')
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary pretext file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary pretext file (%s)" % (nice_size(dataset.get_size()))
class JP2(Binary):
"""
JPEG 2000 binary image format
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.jp2')
>>> JP2().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> JP2().sniff(fname)
False
"""
file_ext = "jp2"
def __init__(self, **kwd):
super().__init__(**kwd)
self._magic = binascii.unhexlify("0000000C6A5020200D0A870A")
def sniff(self, filename):
# The first 12 bytes of any jp2 file are 0000000C6A5020200D0A870A
try:
header = open(filename, 'rb').read(12)
if header == self._magic:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary JPEG 2000 file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary JPEG 2000 file (%s)" % (nice_size(dataset.get_size()))
class Npz(CompressedArchive):
"""
Class describing an Numpy NPZ file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.images.npz')
>>> Npz().sniff(fname)
True
>>> fname = get_test_fname('interval.interval')
>>> Npz().sniff(fname)
False
"""
file_ext = "npz"
# edam_format = "format_4003"
MetadataElement(name="nfiles", default=0, desc="nfiles", readonly=True, visible=True, no_value=0)
MetadataElement(name="files", default=[], desc="files", readonly=True, visible=True, no_value=[])
def __init__(self, **kwd):
super().__init__(**kwd)
def sniff(self, filename):
try:
npz = np.load(filename)
if isinstance(npz, np.lib.npyio.NpzFile):
for f in npz.files:
if isinstance(npz[f], np.ndarray):
return True
except Exception:
return False
return False
def set_meta(self, dataset, **kwd):
try:
with np.load(dataset.file_name) as npz:
dataset.metadata.nfiles = len(npz.files)
dataset.metadata.files = npz.files
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = f"Binary Numpy npz {dataset.metadata.nfiles} files ({nice_size(dataset.get_size())})"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary Numpy npz file (%s)" % (nice_size(dataset.get_size()))
class HexrdImagesNpz(Npz):
"""
Class describing an HEXRD Images Numpy NPZ file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.images.npz')
>>> HexrdImagesNpz().sniff(fname)
True
>>> fname = get_test_fname('eta_ome.npz')
>>> HexrdImagesNpz().sniff(fname)
False
"""
file_ext = "hexrd.images.npz"
MetadataElement(name="panel_id", default='', desc="Detector Panel ID", param=MetadataParameter, readonly=True, visible=True, optional=True, no_value='')
MetadataElement(name="shape", default=(), desc="shape", param=metadata.ListParameter, readonly=True, visible=True, no_value=())
MetadataElement(name="nframes", default=0, desc="nframes", readonly=True, visible=True, no_value=0)
MetadataElement(name="omegas", desc="has omegas", default="False", visible=False)
def __init__(self, **kwd):
super().__init__(**kwd)
def sniff(self, filename):
if super().sniff(filename):
try:
req_files = {'0_row', '0_col', '0_data', 'shape', 'nframes', 'dtype'}
with np.load(filename) as npz:
return set(npz.files) >= req_files
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
return False
def set_meta(self, dataset, **kwd):
super().set_meta(dataset, **kwd)
try:
with np.load(dataset.file_name) as npz:
if 'panel_id' in npz.files:
dataset.metadata.panel_id = str(npz['panel_id'])
if 'omega' in npz.files:
dataset.metadata.omegas = "True"
dataset.metadata.shape = npz['shape'].tolist()
dataset.metadata.nframes = npz['nframes'].tolist()
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = [f"Binary Hexrd Image npz {dataset.metadata.nfiles} files ({nice_size(dataset.get_size())})",
f"Panel: {dataset.metadata.panel_id} Frames: {dataset.metadata.nframes} Shape: {dataset.metadata.shape}"]
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary Numpy npz file (%s)" % (nice_size(dataset.get_size()))
class HexrdEtaOmeNpz(Npz):
"""
Class describing an HEXRD Eta-Ome Numpy NPZ file
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('hexrd.eta_ome.npz')
>>> HexrdEtaOmeNpz().sniff(fname)
True
>>> fname = get_test_fname('hexrd.images.npz')
>>> HexrdEtaOmeNpz().sniff(fname)
False
"""
file_ext = "hexrd.eta_ome.npz"
MetadataElement(name="HKLs", default=(), desc="HKLs", param=metadata.ListParameter, readonly=True, visible=True, no_value=())
MetadataElement(name="nframes", default=0, desc="nframes", readonly=True, visible=True, no_value=0)
def __init__(self, **kwd):
super().__init__(**kwd)
def sniff(self, filename):
if super().sniff(filename):
try:
req_files = {'dataStore', 'etas', 'etaEdges', 'iHKLList', 'omegas', 'omeEdges', 'planeData_hkls'}
with np.load(filename) as npz:
return set(npz.files) >= req_files
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
return False
def set_meta(self, dataset, **kwd):
super().set_meta(dataset, **kwd)
try:
with np.load(dataset.file_name) as npz:
dataset.metadata.HKLs = npz['iHKLList'].tolist()
dataset.metadata.nframes = len(npz['omegas'])
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
lines = [f"Binary Hexrd Eta-Ome npz {dataset.metadata.nfiles} files ({nice_size(dataset.get_size())})",
f"Eta-Ome HKLs: {dataset.metadata.HKLs} Frames: {dataset.metadata.nframes}"]
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary Numpy npz file (%s)" % (nice_size(dataset.get_size()))
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
|
py | 1a49203a3f891164879998fbd52b1e74dbb24a86 | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.label import Label
from kivy.properties import BooleanProperty, ObjectProperty
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
from kivy.network.urlrequest import UrlRequest
import json
import os
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
pass
class SelectableLabel(RecycleDataViewBehavior, Label):
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
self.index = index
return super(SelectableLabel, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
if super(SelectableLabel, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
self.selected = is_selected
class AddLocationForm(BoxLayout):
search_input = ObjectProperty()
search_results = ObjectProperty()
def search_location(self):
search_template = 'http://api.openweathermap.org/data/2.5/' + \
'find?q={}&type=like&appid={}'
key = os.environ.get('OpenWeatherKey')
search_url = search_template.format(self.search_input.text, key)
request = UrlRequest(search_url, self.found_location)
def found_location(self, request, data):
data = json.loads(data.decode()) if not isinstance(data, dict) \
else data
cities = [{'text': '{} ({}) - lat.:{} lon.:{}'.format(
d['name'],
d['sys']['country'],
d['coord']['lat'],
d['coord']['lon'])} for d in data['list']]
self.search_results.data = cities
class WeatherRoot(BoxLayout):
pass
class WeatherApp(App):
pass
if __name__ == '__main__':
WeatherApp().run()
|
py | 1a4920794b237b656befdbb3a6bfc0e55cd8ca47 | import os
import sfepy
from sfepy.base.base import load_classes, insert_static_method
from solvers import *
from eigen import eig
solver_files = sfepy.get_paths('sfepy/solvers/*.py')
remove = ['setup.py', 'solvers.py', 'petsc_worker.py']
solver_files = [name for name in solver_files
if os.path.basename(name) not in remove]
solver_table = load_classes(solver_files,
[LinearSolver, NonlinearSolver,
TimeSteppingSolver, EigenvalueSolver,
OptimizationSolver], package_name='sfepy.solvers')
def register_solver(cls):
"""
Register a custom solver.
"""
solver_table[cls.name] = cls
def any_from_conf(conf, **kwargs):
"""Create an instance of a solver class according to the configuration."""
return solver_table[conf.kind](conf, **kwargs)
insert_static_method(Solver, any_from_conf)
del any_from_conf
del sfepy
|
py | 1a4920c56f0acf8aef82fd8a4fdf8d1c54f44abe | import datetime
import os
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives.serialization import Encoding
from .logger import logger
class StorageEngineCertificateConflict(Exception):
"""
Raise when a StorageEngine implementation is asked to persist a certificate
with a serial number that already exists or CommonName that is already in
use by another non-expired/revoked certificate
"""
class StorageEngineMissing(Exception):
"""
Raise when a StorageEngine type is missing.
"""
class UpdateCertException(Exception):
"""
Raise when attempting to update a cert and parameters are missing.
"""
class SqlStorageEngine:
"""
A Base SQL Storage Engine implementation.
"""
def close(self):
return self.conn.close()
class SQLiteStorageEngine(SqlStorageEngine):
"""
A StorageEngine implementation that persists data to a SQLite3 database
"""
def __init__(self, config):
import sqlite3
db_path = config.get(
"storage.sqlite3",
"db_path",
os.path.join(os.getcwd(), "mtls-server.db"),
)
self.conn = sqlite3.connect(db_path, check_same_thread=False)
def init_db(self):
cur = self.conn.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS certs (
serial_number text,
common_name text,
not_valid_after datetime,
cert blob,
revoked boolean,
fingerprint text
)
"""
)
self.conn.commit()
def save_cert(self, cert, fingerprint):
if self.__conflicting_cert_exists(cert, fingerprint):
raise StorageEngineCertificateConflict
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO certs (
serial_number,
common_name,
not_valid_after,
cert,
revoked,
fingerprint
)
VALUES (?, ?, ?, ?, ?, ?)
""",
[
str(cert.serial_number),
common_name,
cert.not_valid_after,
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
False,
fingerprint,
],
)
self.conn.commit()
def revoke_cert(self, serial_number):
cur = self.conn.cursor()
logger.info(
"Revoking certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"UPDATE certs SET revoked=1 WHERE serial_number=?",
[str(serial_number)],
)
self.conn.commit()
def update_cert(self, serial_number=None, cert=None):
if not serial_number or not cert:
logger.error("A serial number and cert are required to update.")
raise UpdateCertException
cur = self.conn.cursor()
logger.info(
"Updating certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"""
UPDATE
certs
SET
cert=?,
not_valid_after=?
WHERE
serial_number=?
""",
[
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
cert.not_valid_after,
str(serial_number),
],
)
self.conn.commit()
def get_cert(
self,
serial_number=None,
common_name=None,
fingerprint=None,
show_revoked=False,
):
cur = self.conn.cursor()
value = None
query = "SELECT cert FROM certs WHERE"
if serial_number is not None:
query += " serial_number=?"
value = str(serial_number)
elif fingerprint is not None:
query += " fingerprint=?"
value = str(fingerprint)
elif common_name is not None:
query += " common_name=?"
value = str(common_name)
else:
return None
if show_revoked:
query += " AND revoked=1"
else:
query += " AND revoked=0"
cur.execute(query, [str(value)])
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def get_revoked_certs(self):
cur = self.conn.cursor()
now = str(datetime.datetime.utcnow())
cur.execute(
"SELECT cert FROM certs WHERE revoked=1 AND not_valid_after>?",
[now],
)
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def __conflicting_cert_exists(self, cert, fingerprint):
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
SELECT count(*) FROM certs
WHERE serial_number=?
OR (
common_name=?
AND revoked=0
)
""",
[str(cert.serial_number), common_name],
)
conflicts = cur.fetchone()[0]
return conflicts > 0
class PostgresqlStorageEngine(SqlStorageEngine):
"""
A StorageEngine implementation that persists data to a Postgresql database
"""
def __init__(self, config):
import psycopg2
self.conn = psycopg2.connect(
dbname=config.get("storage.postgres", "database"),
user=config.get("storage.postgres", "user"),
password=config.get("storage.postgres", "password"),
host=config.get("storage.postgres", "host", "localhost"),
port=config.get_int("storage.postgres", "port", 5432),
)
def init_db(self):
cur = self.conn.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS certs (
serial_number text,
common_name text,
not_valid_after timestamp,
cert text,
revoked boolean,
fingerprint text
)
"""
)
self.conn.commit()
def save_cert(self, cert, fingerprint):
if self.__conflicting_cert_exists(cert, fingerprint):
raise StorageEngineCertificateConflict
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO certs (
serial_number,
common_name,
not_valid_after,
cert,
revoked,
fingerprint
)
VALUES (%s, %s, %s, %s, %s, %s)
""",
(
str(cert.serial_number),
common_name,
cert.not_valid_after,
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
False,
fingerprint,
),
)
self.conn.commit()
def get_cert(
self,
serial_number=None,
common_name=None,
fingerprint=None,
show_revoked=False,
):
cur = self.conn.cursor()
value = None
query = "SELECT cert FROM certs WHERE"
if serial_number is not None:
query += " serial_number = %s"
value = str(serial_number)
elif fingerprint is not None:
query += " fingerprint = %s"
value = fingerprint
elif common_name is not None:
query += " common_name = %s"
value = common_name
else:
return None
query += " AND revoked = %s"
cur.execute(query, (value, show_revoked))
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def revoke_cert(self, serial_number):
cur = self.conn.cursor()
logger.info(
"Revoking certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"UPDATE certs SET revoked=true WHERE serial_number = %s",
(str(serial_number),),
)
self.conn.commit()
def update_cert(self, serial_number=None, cert=None):
if not serial_number or not cert:
logger.error("A serial number and cert are required to update.")
raise UpdateCertException
cur = self.conn.cursor()
logger.info(
"Updating certificate {serial_number}".format(
serial_number=serial_number
)
)
cur.execute(
"""
UPDATE
certs
SET
cert = %s,
not_valid_after = %s
WHERE
serial_number = %s
""",
(
cert.public_bytes(Encoding.PEM).decode("UTF-8"),
cert.not_valid_after,
str(serial_number),
),
)
self.conn.commit()
def get_revoked_certs(self):
cur = self.conn.cursor()
now = datetime.datetime.utcnow()
not_valid_after = now.strftime("%Y-%m-%d %H:%M:%S")
cur.execute(
"SELECT cert FROM certs WHERE revoked = true AND "
+ "not_valid_after > %s",
(str(not_valid_after),),
)
rows = cur.fetchall()
certs = []
for row in rows:
certs.append(row[0])
return certs
def __conflicting_cert_exists(self, cert, fingerprint):
common_name = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
common_name = common_name[0].value
cur = self.conn.cursor()
cur.execute(
"""
SELECT count(*) FROM certs
WHERE serial_number = %s
OR (
common_name = %s
AND fingerprint = %s
AND revoked=false
)
""",
(str(cert.serial_number), common_name, fingerprint),
)
conflicts = cur.fetchone()[0]
return conflicts > 0
class StorageEngineNotSupportedError(Exception):
"""
Raise when a StorageEngine implementation cannot be created from the
provided configuration
"""
class StorageEngine:
"""
StorageEngine is a factory that returns a concrete engine implementation
depending on the configuration
"""
def __new__(cls, config):
engine = config.get("storage", "engine", None)
if engine is None:
raise StorageEngineMissing()
if engine == "sqlite3":
return SQLiteStorageEngine(config)
elif engine == "postgres":
return PostgresqlStorageEngine(config)
else:
raise StorageEngineNotSupportedError(engine)
|
py | 1a4921cb99344635bab852cd9f36084089a177d8 | import datetime
from docxtpl import DocxTemplate
from docxtpl import InlineImage
from docx.shared import Cm
from docxtpl import DocxTemplate, InlineImage
def get_context(brand, model, fuel_consumption, price):
return {
'brand': brand,
'model': model,
'fuel_consumption': fuel_consumption,
'price': price
}
def from_template(brand, model, fuel_consumption, price, template, signature):
template = DocxTemplate(template)
context = get_context(brand, model, fuel_consumption, price)
# Задаём параметры картинки
img_size = Cm(15)
acc = InlineImage(template, signature, img_size)
# Насыщаем шаблон передаваемой информацией
context['acc'] = acc
template.render(context)
# Сохраняем получившийся файл с информацией
template.save(brand + '_' + str(datetime.datetime.now().date()) + '_data.docx')
def generate_report(brand, model, fuel_consumption, price):
template = 'report.docx'
signature = 'skoda.jpeg'
document = from_template(brand, model, fuel_consumption, price, template, signature)
def to_fixed(num_obj, digits=0):
return f"{num_obj:.{digits}f}"
generate_report('Skoda', 'Octavia', '9 l/100 km', '1 500 000 RUB')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.