ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a34cb55f902b61874eb0d738dcf9b8848b52380 |
# -*- coding: utf-8 -*-
'''
File name: code\chef_showdown\sol_481.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #481 :: Chef Showdown
#
# For more information see:
# https://projecteuler.net/problem=481
# Problem Statement
'''
A group of chefs (numbered #1, #2, etc) participate in a turn-based strategic cooking competition. On each chef's turn, he/she cooks up a dish to the best of his/her ability and gives it to a separate panel of judges for taste-testing. Let S(k) represent chef #k's skill level (which is publicly known). More specifically, S(k) is the probability that chef #k's dish will be assessed favorably by the judges (on any/all turns). If the dish receives a favorable rating, then the chef must choose one other chef to be eliminated from the competition. The last chef remaining in the competition is the winner.
The game always begins with chef #1, with the turn order iterating sequentially over the rest of the chefs still in play. Then the cycle repeats from the lowest-numbered chef. All chefs aim to optimize their chances of winning within the rules as stated, assuming that the other chefs behave in the same manner. In the event that a chef has more than one equally-optimal elimination choice, assume that the chosen chef is always the one with the next-closest turn.
Define Wn(k) as the probability that chef #k wins in a competition with n chefs. If we have S(1) = 0.25, S(2) = 0.5, and S(3) = 1, then W3(1) = 0.29375.
Going forward, we assign S(k) = Fk/Fn+1 over all 1 ≤ k ≤ n, where Fk is a Fibonacci number: Fk = Fk-1 + Fk-2 with base cases F1 = F2 = 1. Then, for example, when considering a competition with n = 7 chefs, we have W7(1) = 0.08965042, W7(2) = 0.20775702, W7(3) = 0.15291406, W7(4) = 0.14554098, W7(5) = 0.15905291, W7(6) = 0.10261412, and W7(7) = 0.14247050, rounded to 8 decimal places each.
Let E(n) represent the expected number of dishes cooked in a competition with n chefs. For instance, E(7) = 42.28176050.
Find E(14) rounded to 8 decimal places.
'''
# Solution
# Solution Approach
'''
'''
|
py | 1a34cc2ae79975a236af2dde49b9a87ff697dde6 | '''
Created on Nov 7, 2016
@author: jack
'''
from string import ascii_lowercase
from Loader import load, imageToWeights
import MultivariateRegression
import copy
from PIL import Image
import PIL
# return map of training vectors -> letter
def createTrainingVectors(letters):
trainingVectors = {} # one list of vectors per letter
for letter, images in letters.items(): #initialize list
trainingVectors[letter] = []
for letter, images in letters.items():
for image in images:
for currentLetter in ascii_lowercase:
weights = imageToWeights(image)
output = 0.0
if letter == currentLetter:
output = 1.0
currentvector = MultivariateRegression.TrainingVector(weights, output)
trainingVectors[currentLetter].append(copy.deepcopy(currentvector))
return trainingVectors
def createRegressions():
regressions = {}
with open('log.txt', 'r') as logfile:
for line in logfile:
character = line[:line.index(':')]
weightText = line[line.index(':')+1:]
weightsStr = weightText.split(' ')
weights = [float(x) for x in weightsStr if len(x) > 0 and x != "\n"]
regressions[character] = MultivariateRegression.MultivariateRegression(None, defweight=weights)
missing = [char for char in ascii_lowercase if (char not in regressions.keys())]
# if the list isn't empty
if missing:
letters = load()
trainers = createTrainingVectors(letters)
for character in missing:
regressions[character] = MultivariateRegression.MultivariateRegression(trainers[character])
with open('log.txt', 'w') as logfile:
for character, regression in regressions.items():
logfile.write(character + ":")
for weight in regression.weights:
logfile.write(" " + str(weight))
logfile.write("\n")
return regressions
def thumb(source):
source.thumbnail((5, 7), PIL.Image.ANTIALIAS)
normSizeImage = Image.new("L", (5,7))
normSizeImage.paste(source, (0, 0, source.width, source.height))
assert normSizeImage.width == 5 and normSizeImage.height == 7
normSizeImage.save("thumb.png")
return normSizeImage
if __name__ == '__main__':
regressions = createRegressions()
#regression = MultivariateRegression.MultivariateRegression(None, defweight=[0.4980739007472371,0.034294627828366626,0.08527965194087288,0.1611870785960047,0.20510706494725237,0.1647477728264825,0.0992302295864054,0.04132866244579191,0.03296886126530924,0.07010771926676687,0.10676335664100603,0.11058294826171326,0.09605828600570894,0.07552970677772958,0.03219182081913526,0.02124397446868094,0.03749483934979752,0.055892351803555826,0.058883158424758206,0.04569714919406262,0.029699170400632097,0.020281550749062675,0.012887985299360796,0.017883024180103586,0.023729694350087253,0.024766788815806413,0.02029564278452003,0.014526331358683814,0.0109255050331542,0.009790039260313185,0.011786566800573337,0.014167221227157203,0.013808719001855726,0.011920327630028092,0.011314754849456685,0.009319095256870028])
for letter in ascii_lowercase:
regression = regressions[letter]
print(letter)
im = Image.open("sample.jpg")
weights = [1] + imageToWeights(im)
print(regression.predict(weights))
im = Image.open("letter-v.jpg")
weights = [1] + imageToWeights(im)
print(regression.predict(weights))
|
py | 1a34cc64c2b84f7af3890c31f1376edfeec05b77 | """
sentry.middleware.maintenance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.conf import settings
from django.http import HttpResponse
logger = logging.getLogger('sentry.errors')
DB_ERRORS = []
try:
import MySQLdb
except ImportError:
pass
else:
DB_ERRORS.append(MySQLdb.OperationalError)
try:
import psycopg2
except ImportError:
pass
else:
DB_ERRORS.append(psycopg2.OperationalError)
DB_ERRORS = tuple(DB_ERRORS)
class ServicesUnavailableMiddleware(object):
def process_request(self, request):
if settings.MAINTENANCE:
return HttpResponse('Sentry is currently in maintenance mode', status=503)
def process_exception(self, request, exception):
if isinstance(exception, DB_ERRORS):
logger.exception('Fatal error returned from database')
return HttpResponse('Sentry is currently in maintenance mode', status=503)
|
py | 1a34cc702eaf2ce371da1b3f3e91174b6653a1ff | from django.conf.urls import url
from . import views
app_name = 'twister'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^twist/$', views.TwistView.as_view(), name='twist'),
url(r'^domain/(?P<pk>.+)/$', views.DomainView.as_view(), name='domain'),
]
|
py | 1a34cdd2b7c354b906ec5cd86c96e5ac441f7da2 | print('test it test it')
|
py | 1a34ce4dde22d92fe6c3af013cef78562e8f39d1 | from typing import List, NamedTuple, Tuple, Union
import geopandas as gpd
import gmsh
import numpy as np
import pandas as pd
import shapely.geometry as sg
from .common import FloatArray, IntArray, coord_dtype, flatten, separate
Z_DEFAULT = 0.0
POINT_DIM = 0
LINE_DIM = 1
PLANE_DIM = 2
class PolygonInfo(NamedTuple):
index: int
size: int
interior_indices: List[int]
interior_sizes: List[int]
polygon_id: int
class LineStringInfo(NamedTuple):
index: int
size: int
embedded_in: Union[int, None]
def polygon_info(
polygon: sg.Polygon, cellsize: float, index: int, polygon_id: int
) -> Tuple[PolygonInfo, FloatArray, FloatArray, int]:
exterior_coords = np.array(polygon.exterior.coords)[:-1]
size = len(exterior_coords)
vertices = [exterior_coords]
cellsizes = [np.full(size, cellsize)]
info = PolygonInfo(index, size, [], [], polygon_id)
index += size
for interior in polygon.interiors:
interior_coords = np.array(interior.coords)[:-1]
vertices.append(interior_coords)
size = len(interior_coords)
cellsizes.append(np.full(size, cellsize))
info.interior_indices.append(index)
info.interior_sizes.append(size)
index += size
return info, vertices, cellsizes, index
def linestring_info(
linestring: sg.LineString, cellsize: float, index: int, inside: Union[int, None]
) -> Tuple[LineStringInfo, FloatArray, FloatArray, int]:
vertices = np.array(linestring.coords)
size = len(vertices)
cellsizes = np.full(size, cellsize)
info = LineStringInfo(index, size, inside)
index += size
return info, vertices, cellsizes, index
def add_vertices(vertices, cellsizes, tags) -> None:
for (x, y), cellsize, tag in zip(vertices, cellsizes, tags):
gmsh.model.geo.addPoint(x, y, Z_DEFAULT, cellsize, tag)
def add_linestrings(
features: List[LineStringInfo], tags: IntArray
) -> Tuple[IntArray, IntArray]:
n_lines = sum(info.size - 1 for info in features)
line_indices = np.empty(n_lines, dtype=np.int64)
embedded_in = np.empty(n_lines, dtype=np.int64)
i = 0
for info in features:
point_tags = tags[info.index : info.index + info.size]
first = point_tags[0]
for second in point_tags[1:]:
line_index = gmsh.model.geo.addLine(first, second)
line_indices[i] = line_index
embedded_in[i] = info.embedded_in
first = second
i += 1
return line_indices, embedded_in
def add_curve_loop(point_tags: FloatArray) -> int:
tags = []
first = point_tags[-1]
for second in point_tags:
line_tag = gmsh.model.geo.addLine(first, second)
tags.append(line_tag)
first = second
curve_loop_tag = gmsh.model.geo.addCurveLoop(tags)
return curve_loop_tag
def add_polygons(
features: List[PolygonInfo], tags: IntArray
) -> Tuple[List[int], List[int]]:
plane_tags = []
for info in features:
# Add the exterior loop first
curve_loop_tags = [add_curve_loop(tags[info.index : info.index + info.size])]
# Now add holes
for start, size in zip(info.interior_indices, info.interior_sizes):
loop_tag = add_curve_loop(tags[start : start + size])
curve_loop_tags.append(loop_tag)
plane_tag = gmsh.model.geo.addPlaneSurface(curve_loop_tags, tag=info.polygon_id)
plane_tags.append(plane_tag)
return curve_loop_tags, plane_tags
def add_points(points: gpd.GeoDataFrame) -> Tuple[IntArray, IntArray]:
n_points = len(points)
indices = np.empty(n_points, dtype=np.int64)
embedded_in = points["__polygon_id"].values
# We have to add points one by one due to the Gmsh addPoint API
for i, row in enumerate(points.to_dict("records")):
point = row["geometry"]
# Rely on the automatic number of gmsh now to generate the indices
point_index = gmsh.model.geo.addPoint(
point.x, point.y, Z_DEFAULT, row["cellsize"]
)
indices[i] = point_index
return indices, embedded_in
def collect_polygons(
polygons: gpd.GeoDataFrame, index: int
) -> Tuple[int, FloatArray, IntArray, List[PolygonInfo]]:
vertices = []
cellsizes = []
features = []
for row in polygons.to_dict("records"):
info, coords, cells, index = polygon_info(
row["geometry"], row["cellsize"], index, row["__polygon_id"]
)
vertices.extend(coords)
cellsizes.extend(cells)
features.append(info)
return index, vertices, cellsizes, features
def collect_linestrings(
linestrings: gpd.GeoDataFrame, index: int
) -> Tuple[int, FloatArray, IntArray, List[LineStringInfo]]:
vertices = []
cellsizes = []
features = []
for row in linestrings.to_dict("records"):
info, coords, cells, index = linestring_info(
row["geometry"], row["cellsize"], index, row["__polygon_id"]
)
vertices.append(coords)
cellsizes.append(cells)
features.append(info)
return index, vertices, cellsizes, features
def collect_points(points: gpd.GeoDataFrame) -> FloatArray:
return np.stack((points["geometry"].x, points["geometry"].y), axis=1)
def embed_where(gdf: gpd.GeoDataFrame, polygons: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
tmp = gpd.sjoin(gdf, polygons, predicate="within", how="inner")
tmp["cellsize"] = tmp[["cellsize_left", "cellsize_right"]].min(axis=1)
return tmp[["cellsize", "__polygon_id", "geometry"]]
def add_geometry(
polygons: gpd.GeoDataFrame, linestrings: gpd.GeoDataFrame, points: gpd.GeoDataFrame
):
# Assign unique ids
polygons["__polygon_id"] = np.arange(1, len(polygons) + 1)
# Figure out in which polygon the points and linestrings will be embedded.
linestrings = embed_where(linestrings, polygons)
embedded_points = embed_where(points, polygons)
# Collect all coordinates, and store the length and type of every element
index, poly_vertices, poly_cellsizes, polygon_features = collect_polygons(
polygons, index=0
)
index, line_vertices, line_cellsizes, linestring_features = collect_linestrings(
linestrings, index
)
vertices = np.concatenate(poly_vertices + line_vertices)
cellsizes = np.concatenate(poly_cellsizes + line_cellsizes)
# Get the unique vertices, and generate the array of indices pointing to
# them for every feature
vertices, indices = np.unique(
vertices.reshape(-1).view(coord_dtype), return_inverse=True
)
vertex_tags = np.arange(1, len(vertices) + 1)
tags = vertex_tags[indices]
# Get the smallest cellsize per vertex
cellsizes = pd.Series(cellsizes).groupby(tags).min().values
# Add all unique vertices. This includes vertices for linestrings and polygons.
add_vertices(vertices, cellsizes, vertex_tags)
# Add all geometries to gmsh
add_polygons(polygon_features, tags)
linestring_indices, linestring_embedded = add_linestrings(linestring_features, tags)
gmsh.model.geo.synchronize()
# Now embed the points and linestrings in the polygons
for polygon_id, embed_indices in pd.Series(linestring_indices).groupby(
linestring_embedded
):
gmsh.model.mesh.embed(LINE_DIM, embed_indices, PLANE_DIM, polygon_id)
if len(embedded_points) > 0:
point_indices, point_embedded = add_points(embedded_points)
gmsh.model.geo.synchronize()
for polygon_id, embed_indices in pd.Series(point_indices).groupby(
point_embedded
):
gmsh.model.mesh.embed(POINT_DIM, embed_indices, PLANE_DIM, polygon_id)
gmsh.model.geo.synchronize()
def add_field_points(points: gpd.GeoSeries) -> IntArray:
indices = np.empty(len(points), dtype=np.int64)
xy_coords = np.stack((points.x, points.y), axis=1)
for i, (x, y) in enumerate(xy_coords):
indices[i] = gmsh.model.geo.addPoint(x, y, Z_DEFAULT)
return indices
def add_field_linestring(
linestring: sg.LineString, minimum_cellsize: float
) -> IntArray:
n_vertices = int(np.ceil(linestring.length / minimum_cellsize))
indices = np.empty(n_vertices, dtype=np.int64)
for i, distance in enumerate(np.linspace(0.0, linestring.length, n_vertices)):
point = linestring.interpolate(distance)
indices[i] = gmsh.model.geo.addPoint(point.x, point.y, Z_DEFAULT)
return indices
def add_field_linestrings(
linestrings: gpd.GeoSeries, minimum_cellsize: float
) -> IntArray:
indices = [
add_field_linestring(linestring, minimum_cellsize) for linestring in linestrings
]
return np.concatenate(indices)
def add_field_polygons(polygons: gpd.GeoSeries, minimum_cellsize: float) -> IntArray:
indices = []
for exterior in polygons.exteriors:
indices.append(add_field_linestring(exterior, minimum_cellsize))
for interior in flatten(polygons.interiors):
indices.append(add_field_linestring(interior, minimum_cellsize))
return np.concatenate(indices)
def add_field_geometry(geometry: gpd.GeoSeries, minimum_cellsize: float) -> IntArray:
polygons, linestrings, points = separate(geometry)
point_nodes = add_field_points(points)
linestring_nodes = add_field_linestrings(linestrings, minimum_cellsize)
polygon_nodes = add_field_polygons(polygons, minimum_cellsize)
return np.concatenate((point_nodes, linestring_nodes, polygon_nodes))
|
py | 1a34d0029e0dc2197f2851ac179ba63891993965 | """
Download ACS data and parse for uploading
"""
import os.path
import json
import grequests
import pandas as pd
from ntd import update_dollars
from carto import replace_data
import settings
def process_result(i, y, var, indexes, frames):
"""Transform downloaded result to data frame by year"""
result = pd.DataFrame(i.json())
result.columns = result.iloc[0]
result = result.reindex(result.index.drop(0))
if 'metropolitan statistical area/micropolitan statistical area' in result.columns:
result.rename(
columns={'metropolitan statistical area/micropolitan statistical area':'GEOID'},
inplace=True
)
else:
result['GEOID'] = pd.Series(
result['state'] + result['county'] + result['tract']
).astype(str)
result['year'] = y
out = result.set_index(indexes)
df = out.groupby(level=out.index.names).last()
data = pd.to_numeric(df[var])
frames.append(data[data >= 0])
return frames
def combine_files(frame, geo, cols, index):
"""Merge downloaded ACS data with geo file"""
return pd.concat(frame, axis=1).reset_index().merge(
geo, on='GEOID', how='inner'
).drop(
columns=cols
).dropna(
subset=['pop']
).set_index(index)
def download_census():
"""Download and parse ACS data as defined in acs.json"""
geo = pd.read_csv('data/geojson/tracts/cbsa_crosswalk.csv', dtype={'GEOID': object})
counties = geo.drop_duplicates(
['STATEFP', 'COUNTYFP', 'msaid']
)
counties = counties.groupby('STATEFP')[['STATEFP', 'COUNTYFP', 'msaid']].apply(
lambda x: x.set_index('COUNTYFP').to_dict(orient='index')
).to_dict()
msa_geo = pd.read_csv('data/geojson/cbsa/cb_2017_us_cbsa_500k.csv', dtype={'GEOID': object})
indexes = ['GEOID', 'year']
msa_indexes = ['GEOID', 'year']
with open('data/census/acs.json', "r") as read_file:
meta = json.load(read_file)
acs = []
msa = []
for r in meta:
if 'var' in r:
filename = 'data/output/census/' + r['key'] + '.csv'
if os.path.isfile(filename):
csv = pd.read_csv(
filename, dtype={'GEOID': object}
).set_index(indexes)
df = csv.groupby(level=csv.index.names).last()
acs.append(df)
elif r['var'] != '99999999':
frames = []
errors = []
for y in range(2010, 2019):
for s in counties:
urls = errors
errors = []
for c in counties[s]:
urls.append('https://api.census.gov/data/' + str(y) + \
'/acs/acs5?get=' + r['var'] + '&for=tract:*&in=state:' + \
str(s).zfill(2) + '%20county:' + str(c).zfill(3) + '&key=' + \
settings.CENSUS_API)
rs = (grequests.get(u) for u in urls)
res = grequests.map(rs)
for i in res:
try:
frames = process_result(i, y, r['var'], indexes, frames)
except:
try:
print(i.text)
errors.append(i.url)
except:
pass
print('Loaded:', r['name'], y, s)
print('-----')
if errors:
print('Retrying', len(errors), 'errors')
ind = pd.Series(pd.concat(frames), name=r['key'])
ind.to_csv(filename, header=r['key'])
acs.append(ind)
filename = 'data/output/msa/' + r['key'] + '.csv'
if os.path.isfile(filename):
csv = pd.read_csv(
filename, dtype={'GEOID': object}
).set_index(indexes)
df = csv.groupby(level=csv.index.names).last()
msa.append(df)
elif r['var'] != '99999999':
frames = []
for y in range(2010, 2019):
urls = ['https://api.census.gov/data/' + str(y) + \
'/acs/acs5?get=' + r['var'] + \
'&for=metropolitan statistical area/micropolitan statistical area:*' + \
'&key=' + settings.CENSUS_API]
rs = (grequests.get(u) for u in urls)
res = grequests.map(rs)
frames = process_result(res[0], y, r['var'], indexes, frames)
ind = pd.Series(pd.concat(frames), name=r['key'])
ind.to_csv(filename, header=r['key'])
msa.append(ind)
combined = combine_files(
acs,
geo,
['STATEFP', 'COUNTYFP', 'TRACTCE', 'AFFGEOID', 'NAME', 'AWATER', 'LSAD', 'CBSAFP'],
indexes
)
msa_combo = combine_files(
msa,
msa_geo,
['AFFGEOID', 'NAME', 'AWATER', 'LSAD', 'CBSAFP'],
msa_indexes
)
for d in meta:
print(d['name'])
if 'upload' in d and d['upload']:
indexes.append(d['key'])
if 'msa' in d and d['msa']:
msa_indexes.append(d['key'])
if 'inflation' in d:
combined[d['key']] = update_dollars(pd.Series(combined[d['key']], name=d['key']))
msa_combo[d['key']] = update_dollars(pd.Series(msa_combo[d['key']], name=d['key']))
if 'var' not in d:
if 'sum' in d:
combined[d['key']] = 0
if 'msa' in d and d['msa']:
msa_combo[d['key']] = 0
for s in d['sum']:
combined[d['key']] = combined[d['key']] + combined[s]
if 'msa' in d and d['msa']:
msa_combo[d['key']] = msa_combo[d['key']] + msa_combo[s]
else:
combined[d['key']] = combined[d['numerator']].divide(
combined[d['denominator']],
fill_value=0
)
if 'msa' in d and d['msa']:
msa_combo[d['key']] = msa_combo[d['numerator']].divide(
msa_combo[d['denominator']],
fill_value=0
)
if 'scale' in d:
combined[d['key']] = combined[d['key']] * d['scale']
if 'msa' in d and d['msa']:
msa_combo[d['key']] = msa_combo[d['key']] * d['scale']
export_msa = msa_combo.reset_index()
export_msa_filtered = export_msa[
export_msa.GEOID.isin([str(i) for i in combined.msaid.unique().tolist()])
]
export_msa_filtered[msa_indexes].astype({'pop': 'int32'}).to_csv(
'data/output/census_msa.csv', index=False
)
replace_data('census_msa', msa_indexes, 'census_msa.csv')
indexes.append('msaid')
export = combined.reset_index()
export[indexes].to_csv('data/output/census.csv', index=False)
replace_data('census', indexes, 'census.csv')
if __name__ == "__main__":
download_census()
|
py | 1a34d0b1aeb9a16913ce521b63435b149c8ddbae | import warnings
from collections import namedtuple
from contextlib import suppress
import boto3
from botocore.exceptions import ClientError
from dagster import Array, Field, Noneable, ScalarUnion, StringSource, check
from dagster.core.events import EngineEventData, MetadataEntry
from dagster.core.launcher.base import LaunchRunContext, RunLauncher
from dagster.grpc.types import ExecuteRunArgs
from dagster.serdes import ConfigurableClass
from dagster.utils import merge_dicts
from ..secretsmanager import get_secrets_from_arns, get_tagged_secrets
from .tasks import default_ecs_task_definition, default_ecs_task_metadata
from .utils import sanitize_family
Tags = namedtuple("Tags", ["arn", "cluster", "cpu", "memory"])
class EcsRunLauncher(RunLauncher, ConfigurableClass):
"""RunLauncher that starts a task in ECS for each Dagster job run."""
def __init__(
self,
inst_data=None,
task_definition=None,
container_name="run",
secrets=None,
secrets_tag="dagster",
include_sidecars=False,
):
self._inst_data = inst_data
self.ecs = boto3.client("ecs")
self.ec2 = boto3.resource("ec2")
self.secrets_manager = boto3.client("secretsmanager")
self.task_definition = task_definition
self.container_name = container_name
self.secrets = secrets or []
if all(isinstance(secret, str) for secret in self.secrets):
warnings.warn(
"Setting secrets as a list of ARNs is deprecated. "
"Secrets should instead follow the same structure as the ECS API: "
"https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html",
DeprecationWarning,
)
self.secrets = get_secrets_from_arns(self.secrets_manager, self.secrets)
else:
self.secrets = {secret["name"]: secret["valueFrom"] for secret in self.secrets}
self.secrets_tag = secrets_tag
self.include_sidecars = include_sidecars
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=task_definition)
container_names = [
container.get("name")
for container in task_definition["taskDefinition"]["containerDefinitions"]
]
check.invariant(
container_name in container_names,
f"Cannot override container '{container_name}' in task definition "
f"'{self.task_definition}' because the container is not defined.",
)
self.task_definition = task_definition["taskDefinition"]["taskDefinitionArn"]
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {
"task_definition": Field(
StringSource,
is_required=False,
description=(
"The task definition to use when launching new tasks. "
"If none is provided, each run will create its own task "
"definition."
),
),
"container_name": Field(
StringSource,
is_required=False,
default_value="run",
description=(
"The container name to use when launching new tasks. Defaults to 'run'."
),
),
"secrets": Field(
Array(
ScalarUnion(
scalar_type=str,
non_scalar_schema={"name": StringSource, "valueFrom": StringSource},
)
),
is_required=False,
description=(
"An array of AWS Secrets Manager secrets. These secrets will "
"be mounted as environment variabls in the container. See "
"https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html."
),
),
"secrets_tag": Field(
Noneable(StringSource),
is_required=False,
default_value="dagster",
description=(
"AWS Secrets Manager secrets with this tag will be mounted as "
"environment variables in the container. Defaults to 'dagster'."
),
),
"include_sidecars": Field(
bool,
is_required=False,
default_value=False,
description=(
"Whether each run should use the same sidecars as the task that launches it. "
"Defaults to False."
),
),
}
@staticmethod
def from_config_value(inst_data, config_value):
return EcsRunLauncher(inst_data=inst_data, **config_value)
def _set_ecs_tags(self, run_id, task_arn):
try:
tags = [{"key": "dagster/run_id", "value": run_id}]
self.ecs.tag_resource(resourceArn=task_arn, tags=tags)
except ClientError:
pass
def _set_run_tags(self, run_id, task_arn):
cluster = self._task_metadata().cluster
tags = {"ecs/task_arn": task_arn, "ecs/cluster": cluster}
self._instance.add_run_tags(run_id, tags)
def _get_run_tags(self, run_id):
run = self._instance.get_run_by_id(run_id)
tags = run.tags if run else {}
arn = tags.get("ecs/task_arn")
cluster = tags.get("ecs/cluster")
cpu = tags.get("ecs/cpu")
memory = tags.get("ecs/memory")
return Tags(arn, cluster, cpu, memory)
def launch_run(self, context: LaunchRunContext) -> None:
"""
Launch a run in an ECS task.
Currently, Fargate is the only supported launchType and awsvpc is the
only supported networkMode. These are the defaults that are set up by
docker-compose when you use the Dagster ECS reference deployment.
"""
run = context.pipeline_run
family = sanitize_family(
run.external_pipeline_origin.external_repository_origin.repository_location_origin.location_name
)
metadata = self._task_metadata()
pipeline_origin = context.pipeline_code_origin
image = pipeline_origin.repository_origin.container_image
task_definition = self._task_definition(family, metadata, image)["family"]
args = ExecuteRunArgs(
pipeline_origin=pipeline_origin,
pipeline_run_id=run.run_id,
instance_ref=self._instance.get_ref(),
)
command = args.get_command_args()
# Set cpu or memory overrides
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html
cpu_and_memory_overrides = {}
tags = self._get_run_tags(run.run_id)
if tags.cpu:
cpu_and_memory_overrides["cpu"] = tags.cpu
if tags.memory:
cpu_and_memory_overrides["memory"] = tags.memory
# Run a task using the same network configuration as this processes's
# task.
response = self.ecs.run_task(
taskDefinition=task_definition,
cluster=metadata.cluster,
overrides={
"containerOverrides": [
{
"name": self.container_name,
"command": command,
# containerOverrides expects cpu/memory as integers
**{k: int(v) for k, v in cpu_and_memory_overrides.items()},
}
],
# taskOverrides expects cpu/memory as strings
**cpu_and_memory_overrides,
},
networkConfiguration={
"awsvpcConfiguration": {
"subnets": metadata.subnets,
"assignPublicIp": metadata.assign_public_ip,
"securityGroups": metadata.security_groups,
}
},
launchType="FARGATE",
)
tasks = response["tasks"]
if not tasks:
failures = response["failures"]
exceptions = []
for failure in failures:
arn = failure.get("arn")
reason = failure.get("reason")
detail = failure.get("detail")
exceptions.append(Exception(f"Task {arn} failed because {reason}: {detail}"))
raise Exception(exceptions)
arn = tasks[0]["taskArn"]
self._set_run_tags(run.run_id, task_arn=arn)
self._set_ecs_tags(run.run_id, task_arn=arn)
self._instance.report_engine_event(
message="Launching run in ECS task",
pipeline_run=run,
engine_event_data=EngineEventData(
[
MetadataEntry("ECS Task ARN", value=arn),
MetadataEntry("ECS Cluster", value=metadata.cluster),
MetadataEntry("Run ID", value=run.run_id),
]
),
cls=self.__class__,
)
def can_terminate(self, run_id):
tags = self._get_run_tags(run_id)
if not (tags.arn and tags.cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status and status != "STOPPED":
return True
return False
def terminate(self, run_id):
tags = self._get_run_tags(run_id)
if not (tags.arn and tags.cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status == "STOPPED":
return False
self.ecs.stop_task(task=tags.arn, cluster=tags.cluster)
return True
def _task_definition(self, family, metadata, image):
"""
Return the launcher's task definition if it's configured.
Otherwise, a new task definition revision is registered for every run.
First, the process that calls this method finds its own task
definition. Next, it creates a new task definition based on its own
but it overrides the image with the pipeline origin's image.
"""
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=self.task_definition)
return task_definition["taskDefinition"]
secrets = merge_dicts(
(
get_tagged_secrets(self.secrets_manager, self.secrets_tag)
if self.secrets_tag
else {}
),
self.secrets,
)
secrets_dict = (
{"secrets": [{"name": key, "valueFrom": value} for key, value in secrets.items()]}
if secrets
else {}
)
task_definition = {}
with suppress(ClientError):
task_definition = self.ecs.describe_task_definition(taskDefinition=family)[
"taskDefinition"
]
container_definitions = task_definition.get("containerDefinitions", [{}])
for container_definition in container_definitions:
if (
container_definition.get("image") == image
and container_definition.get("name") == self.container_name
and container_definition.get("secrets") == secrets_dict.get("secrets", [])
):
return task_definition
return default_ecs_task_definition(
self.ecs,
family,
metadata,
image,
self.container_name,
secrets=secrets_dict,
include_sidecars=self.include_sidecars,
)
def _task_metadata(self):
return default_ecs_task_metadata(self.ec2, self.ecs)
|
py | 1a34d0bebf9cc53af63f34700842fd16f6867e2f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Provides endpoint and web page for simple search API."""
import os
import json
from typing import Iterable, Iterator
from flask import Flask, render_template, abort, jsonify
from webargs.flaskparser import use_kwargs
from webargs import fields
FIELD_NAMES = ['job_history', 'company', 'email', 'city', 'country', 'name']
app = Flask(__name__)
def must_match_field_name(value):
return value in FIELD_NAMES
def prepare_data(data: Iterable[dict]) -> Iterator[dict]:
"""Make job_history list comma delimited for ease of processing/display.
"""
for datum in data:
datum['job_history'] = ', '.join(datum['job_history'])
yield datum
def filtered_results(data: Iterable[dict],
query: str,
field: str) -> Iterator[dict]:
if not query:
yield from data
return
for datum in data:
if field:
# Case-insensitive for simplicity
if query.lower() in datum[field].lower():
yield datum
else:
for field_name in FIELD_NAMES:
if query.lower() in datum[field_name].lower():
yield datum
break
@app.route("/", methods=['get'])
def search():
return render_template('search.html')
@app.route("/search", methods=['get'])
@use_kwargs({
'query': fields.Str(missing=None),
'field': fields.Str(missing=None, validate=must_match_field_name),
'size': fields.Int(missing=20),
'offset': fields.Int(missing=0)
}, location="query")
def search_api(query, field, size, offset):
# File used in this example instead of further API call
# or database connection
json_path = os.path.join(app.root_path,
'static/json',
'mock-contacts.json')
data = json.load(open(json_path))
prepped_data = prepare_data(data)
results = list(filtered_results(prepped_data, query, field))
index_start = size * offset
if index_start > len(results):
abort(400)
index_stop = min(size + (size * offset), len(results))
body = {
'results': results[index_start:index_stop],
'total': len(results)
}
return jsonify(body)
@app.route("/rowchanged", methods=['post'])
@use_kwargs({
'rowindex': fields.Int(missing=None),
'oldvalue': fields.Str(missing=None),
'newvalue': fields.Str(missing=None),
'data': fields.Dict(missing=None),
}, location="json")
def rowchanged(rowindex, oldvalue, newvalue, data):
print(f"rowchanged(): row {rowindex}, '{oldvalue}' -> '{newvalue}'")
result = {
'resultcode': 'OK'
}
return jsonify(result)
if __name__ == '__main__':
app.run()
|
py | 1a34d20d9f6ef9052c4432ddcb8478f7b9e1185c | from functools import partial
import trw
import torch.nn as nn
import torch
class Net(nn.Module):
def __init__(self):
super().__init__()
self.encoder_decoder = trw.layers.AutoencoderConvolutional(
2,
1,
[8, 16, 32],
[32, 16, 8, 1], # make sure we are cropping the decoded output by adding another layer
convolution_kernels=3,
squash_function=torch.sigmoid, # make sure the output is [0..1] domain
last_layer_is_output=True # do not apply the activation on the last layer
)
def forward(self, batch):
x = batch['images']
encoded_x, decoded_x = self.encoder_decoder.forward_with_intermediate(x)
return {
'regression': trw.train.OutputRegression(decoded_x, x),
}
def per_epoch_fn():
callbacks = [
trw.callbacks.CallbackEpochSummary(),
trw.callbacks.CallbackSkipEpoch(
nb_epochs=1,
callbacks=[trw.callbacks.CallbackReportingExportSamples(table_name='random_samples', max_samples=5, split_exclusions=['train'])]),
]
return callbacks
options = trw.train.Options(num_epochs=100)
trainer = trw.train.TrainerV2(
callbacks_per_epoch=per_epoch_fn(),
callbacks_post_training=[trw.callbacks.CallbackReportingExportSamples(max_samples=2000)],
)
results = trainer.fit(
options,
datasets=trw.datasets.create_mnist_dataset(normalize_0_1=True),
log_path='mnist_autoencoder',
model=Net(),
optimizers_fn=partial(trw.train.create_sgd_optimizers_fn, learning_rate=0.25))
|
py | 1a34d2d9f4a544b7e2a509070e5a8baee289773d | import time
import hashlib
import matplotlib.pyplot as plot
from passlib.hash import b
import random
import argon
# Random salt generation
def ransalt():
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
chars = []
for i in range(16):
chars.append(random.choice(ALPHABET))
return "".join(chars)
# Variables
string = input('Enter the benchmarking string: ')
key = b + input('Please specify a key between 4 and 56 bytes: ')
algo = ['MD5', 'SHA-1', 'SHA-224', 'SHA-256', 'SHA-384', 'SHA-512', 'Scrpyt', 'Bcrypt', 'Argon2']
colors = ['b', 'c', 'y', 'm', 'r', 'k', 'l', 'm', 'f']
results = {}
# Getting iterations and step
iterations = int(raw_input("Iterations: "))
while iterations < 1 or iterations > 1000000:
iterations = int(raw_input("Please enter a valid value for the number of iterations (1-1000000): "))
step = int(raw_input("Step: "))
while step < 1 or step > 1000000:
step = int(raw_input("Please enter a valid value for the step (1-1000000): "))
print("\nbenchmarking...\n")
# MD5
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.md5(string)
results[0,(i+1)*step] = (time.time() - Start)
print("\nMD5 benchmark done.\n")
# SHA-1
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha1(string)
results[1, (i+1)*step] = (time.time() - Start)
print("\nSHA-1 benchmark done.\n")
# SHA-224
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha224(string)
results[2, (i+1)*step] = (time.time() - Start)
print("\nSHA-224 benchmark done.\n")
# SHA-256
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha256(string)
results[3, (i+1)*step] = (time.time() - Start)
print("\nSHA-256 benchmark done.\n")
# SHA-384
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha384(string)
results[4, (i+1)*step] = (time.time() - Start)
print("\nSHA-384 benchmark done.\n")
# SHA-512
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha512(string)
results[5, (i+1)*step] = (time.time() - Start)
print("\nSHA-512 benchmark done.\n")
# Bcrypt
Start = time.time()
tString = b + string
for i in range (iterations):
for j in range ((i+1)*step):
bcrypt.hashpw(tString, bcrypt.gensalt()) #random salt
results[6, (i+1)*step] = (time.time() - Start)
print("\nBcrypt benchmark done.\n")
# Scrypt
Start = time.time()
tString = b + string #redundant but for exhibition purposes
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.scrypt(key, ransalt())
results[7, (i+1)*step] = (time.time() - Start)
print("\nScrypy benchmark done.\n")
# Argon2
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
argon2.argon2_hash(string, ransalt())
results[8, (i+1)*step] = (time.time() - Start)
print("\nArgon2 benchmark done.\n")
# Generate plot and print results
print("\n---------- Report ----------\n")
for i in range(9):
print(algo[i])
for j in range (iterations):
print((j+1)*step, 'iterations in', results[i,(j+1)*step]*pow(10,3), 'ms')
plot.plot((j+1)*step, results[i,(j+1)*step]*pow(10,3),colors[i]+'o', label=str(algo[i]) if j == 0 else "")
print('\n')
plot.xlabel('Iterations')
plot.ylabel('Execution time in milliseconds')
plot.title('PyBenchHash', fontsize=40, color='white')
plot.legend(loc=2)
plot.grid(True)
plot.show() |
py | 1a34d31311ead09e80be8546eae7609c64400686 | #!/usr/bin/env python3
import os
import math
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad",
"statsd", "shutdownd"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
CSID_MAP = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError, "2": EventName.driverCameraError}
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
self.read_only = not car_recognized or not controller_available or self.CP.dashcamOnly
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP, self.CI)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
# Add startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in (PandaType.uno, PandaType.dos):
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in (LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing):
self.events.add(EventName.laneChange)
if not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or \
pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam or \
pandaState.unsafeMode != self.CP.unsafeMode
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid() or self.can_rcv_error:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive, can_error=self.can_rcv_error, error=True)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(3. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
for m in messaging.drain_sock(self.log_sock, wait_for_one=False):
try:
msg = m.androidLog.message
if any(err in msg for err in ("ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED")):
csid = msg.split("CSID:")[-1].split(" ")[0]
evt = CSID_MAP.get(csid, None)
if evt is not None:
self.events.add(evt)
except UnicodeDecodeError:
pass
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = {p.name for p in self.sm['managerState'].processes if not p.running}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
if not self.initialized:
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION:
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
if REPLAY and self.sm['pandaStates'][0].controlsAllowed:
self.state = State.enabled
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if self.enabled and any(not ps.controlsAllowed for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
elif CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
t_since_plan = (self.sm.frame - self.sm.rcv_frame['longitudinalPlan']) * DT_CTRL
actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits, t_since_plan)
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params, self.last_actuators,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Send a "steering required alert" if saturation count has reached the limit
if lac_log.active and lac_log.saturated and not CS.steeringPressed:
dpath_points = lat_plan.dPathPoints
if len(dpath_points):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and dpath_points[0] < -0.20
right_deviation = actuators.steer < 0 and dpath_points[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers:
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
orientation_value = self.sm['liveLocationKalman'].orientationNED.value
if len(orientation_value) > 2:
CC.roll = orientation_value[0]
CC.pitch = orientation_value[1]
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event_types = set()
if ET.WARNING not in self.current_alert_types:
clear_event_types.add(ET.WARNING)
if self.enabled:
clear_event_types.add(ET.NO_ENTRY)
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
current_alert = self.AM.process_alerts(self.sm.frame, clear_event_types)
if current_alert:
hudControl.visualAlert = current_alert.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
if current_alert:
controlsState.alertText1 = current_alert.alert_text_1
controlsState.alertText2 = current_alert.alert_text_2
controlsState.alertSize = current_alert.alert_size
controlsState.alertStatus = current_alert.alert_status
controlsState.alertBlinkingRate = current_alert.alert_rate
controlsState.alertType = current_alert.alert_type
controlsState.alertSound = current_alert.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
py | 1a34d38a696231d844d4d60dc559ff3df2414219 | from dataclasses import dataclass
@dataclass
class ParmModel:
UserID: str
Version: str
Env: str
Source: str
Session: int
RC: int
ResultMsg: str
UserType: str
Email: str
Profile: str
UserName: str
@dataclass
class UserInfoModel:
Parms: ParmModel
|
py | 1a34d3c42e58f97c3d62a344b04640b18097f2ff | """
Methods for computing confidence intervals.
"""
import scipy.special as special
import numpy as np
import pandas as pd
import scipy.stats as stats
def z_effect(ci_low, ci_high):
"""
Compute an effect score for a z-score.
Parameters
----------
ci_low :
Lower bound of the confidence interval
ci_high :
Upper bound of the confidence interval
Returns
-------
score :
An effect score for a Z-score
Notes
----
This is the absolute value of the lower bound of the confidence interval,
or zero if the interval contains zero.
"""
if np.isnan(ci_low) or np.isnan(ci_high):
return 0
return 0 if (ci_low * ci_high < 0) else min(abs(ci_low), abs(ci_high))
def ci_mi(g, dof, n, conf):
"""
Compute confidence interval for mutual information from the chi-squared
distribution.
Parameters
----------
g :
the G-test score
dof :
the number of degrees of freedom
n :
the size of the data sample
conf :
the confidence level
Returns
-------
ci_low :
The lower level of the confidence interval for MI
ci_high :
The upper level of the confidence interval for MI
References
----------
Smithson, M. (Ed.). (2003). Confidence Intervals. (07/140). Thousand Oaks,
CA: SAGE Publications, Inc. doi: http://dx.doi.org/10.4135/9781412983761
https://en.wikipedia.org/wiki/G-test
"""
p_low = 1-(1-conf)/2
p_high = (1-conf)/2
g_low = special.chndtrinc(g, dof, p_low)
g_high = special.chndtrinc(g, dof, p_high)
ci_low, ci_high = ((g_low+dof)/(2.0*n), (g_high+dof)/(2.0*n))
return ci_low, ci_high
def ci_norm(conf, stat, sigma):
"""
Confidence interval for a normal approximation.
Parameters
----------
conf :
the confidence level
stat :
the asymptotically normal statistic
sigma :
the standard deviation of the statistic
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
ci_low, ci_high = stats.norm.interval(conf, loc=stat, scale=sigma)
return ci_low, ci_high
def bootstrap_ci_ct(data, stat, num_samples=10000, conf=0.95):
"""
Bootstrap confidence interval computation on a contingency table
Parameters
----------
data :
Contingency table collected from independent samples
stat :
Statistic to bootstrap. Takes a contingency table as argument
num_samples :
Number of bootstrap samples to generate
conf :
Confidence level for the interval
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
if isinstance(data, pd.DataFrame):
data = data.values
dim = data.shape
data = data.flatten()
data += 1
n = data.sum()
# print 'Bootstrap on data of size {}'.format(n)
probas = (1.0*data)/n
# Obtain `num_samples' random samples of `n' multinomial values, sampled
# with replacement from {0, 1, ..., n-1}. For each sample, rebuild a
# contingency table and compute the stat.
temp = np.random.multinomial(n, probas, size=num_samples)
bs_stats = [row.reshape(dim) for row in temp]
bs_stats = [stat(ct) for ct in bs_stats]
alpha = 1-conf
ci_low = np.percentile(bs_stats, 100*alpha/2)
ci_high = np.percentile(bs_stats, 100*(1-alpha/2))
return ci_low, ci_high
def bootstrap_ci_corr(x, y, stat, num_samples=10000, conf=0.95):
"""
Bootstrap confidence interval computation for correlation
Parameters
----------
x :
First dimension of the data
y :
Second dimension of the data
stat :
Statistic to bootstrap. Takes a two-dimensional array as input
num_samples :
Number of bootstrap samples to generate
conf :
Confidence level for the interval
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
data = np.array(zip(x, y))
n = len(data)
idxs = np.random.randint(0, n, (num_samples, n))
samples = [data[idx] for idx in idxs]
bs_stats = [stat(sample[:, 0], sample[:, 1]) for sample in samples]
alpha = 1-conf
ci_low = np.percentile(bs_stats, 100*alpha/2)
ci_high = np.percentile(bs_stats, 100*(1-alpha/2))
return ci_low, ci_high
def bootstrap_ci_ct_cond(data, stat, num_samples=10000, conf=0.95):
"""
Bootstrap confidence interval computation on a 3-way contingency table
Parameters
----------
data :
Contingency table collected from independent samples
stat :
Statistic to bootstrap. Takes a 3-way contingency table as argument
num_samples :
Number of bootstrap samples to generate
conf :
Confidence level for the interval
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
data = np.array([ct.values if isinstance(ct, pd.DataFrame)
else ct for ct in data])
dim = data.shape
data = [ct.flatten()+1 for ct in data]
probas = [(1.0*ct)/ct.sum() for ct in data]
# Resample for each explanatory group
temp = np.dstack([np.random.multinomial(data[i].sum(),
probas[i],
size=num_samples)
for i in range(dim[0])])
bs_stats = [row.T.reshape(dim) for row in temp]
bs_stats = [stat(ct) for ct in bs_stats]
alpha = 1-conf
ci_low = np.percentile(bs_stats, 100*alpha/2)
ci_high = np.percentile(bs_stats, 100*(1-alpha/2))
return ci_low, ci_high
|
py | 1a34d41006b72b4863ec35fb56a819fd73f603df | from django.test import TestCase
from mock import patch, MagicMock
from model_mommy import mommy
from dbaas.tests.helpers import DatabaseHelper
from logical.models import Database
from notification.tasks import check_database_is_alive
@patch('logical.models.Database.update_status', new=MagicMock())
@patch('notification.tasks.get_worker_name', new=MagicMock())
@patch('notification.tasks.TaskHistory.register')
class DatabaseStatusTestCase(TestCase):
def setUp(self):
self.task_history = mommy.make(
'TaskHistory',
task_name='notification.tasks.database_status',
)
def test_database_alive(self, task_register_mock):
database = DatabaseHelper.create(name='test', status=Database.ALIVE)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'SUCCESS')
self.assertIn('Database test is Alive', self.task_history.details)
def test_database_initializing(self, task_register_mock):
database = DatabaseHelper.create(
name='test', status=Database.INITIALIZING
)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'SUCCESS')
self.assertIn('Database test is Initializing',
self.task_history.details
)
def test_database_alert(self, task_register_mock):
database = DatabaseHelper.create(name='test', status=Database.ALERT)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'ERROR')
self.assertIn('Database test is Alert', self.task_history.details)
def test_database_dead(self, task_register_mock):
database = DatabaseHelper.create(name='test', status=Database.DEAD)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'ERROR')
self.assertIn('Database test is Dead', self.task_history.details)
|
py | 1a34d6a715374168234560bec7aff4f2b6d427f6 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 12:48:54 2019
@author: James Kring
@email: [email protected]
"""
import sys
sys.path.insert(0, '/home/cth/cthgroup/Python/recon')
from recon_input import InputClass
import click
# =============================================================================
# Example Commandline Use: python recon_runner.py shot 'times'
# python recon_runner.py 14092626 '1.62 1.63 1.64'
#
# Input files will be saved in shot_number directory that is in the
# working_directory set below.
# =============================================================================
@click.command(context_settings=dict(ignore_unknown_options=True,
allow_extra_args=True,))
@click.pass_context
@click.argument('shotnumber')
@click.argument('times')
@click.option('--inputs', is_flag=True, help='Generates input files only')
def cmd_line(ctx, shotnumber, times, inputs):
d = dict()
for item in ctx.args:
d1 = [item.split('=')]
d.update(d1)
times = times.split()
n_times = []
for time in times:
n_times.append(float(time))
try:
with open('v3config.txt', 'r') as file:
lines = file.readlines()
marker = True
except:
marker = False
if marker:
for i, line in enumerate(lines):
if line.startswith('working_directory'):
working_directory1 = line.rstrip().split('=')[1]
elif line.startswith('vmec_template'):
vmec_template1 = line.rstrip().split('=')[1]
elif line.startswith('v3fit_template'):
v3fit_template1 = line.rstrip().split('=')[1]
elif line.startswith('v3fit_executable'):
v3fit_executable1 = line.rstrip().split('=')[1]
if 'v3fit_executable' not in d:
d['v3fit_executable']=v3fit_executable1
if 'directory' not in d:
d['directory']=working_directory1
if 'vmec_template' not in d:
d['vmec_template']=vmec_template1
if 'v3fit_template' not in d:
d['v3fit_template']=v3fit_template1
shot = InputClass(int(shotnumber), n_times,
**d)
else:
shot = InputClass(int(shotnumber), n_times, **d)
if inputs:
shot.generate_input_files()
else:
shot.generate_and_run()
if __name__ == '__main__':
cmd_line() |
py | 1a34d6e8b207c1cbd59ff80126bebdf8935da978 | from code_pipeline.tests_generation import RoadTestFactory
from time import sleep
from swat_gen.road_gen import RoadGen
import logging as log
from code_pipeline.validation import TestValidator
from code_pipeline.tests_generation import RoadTestFactory
from scipy.interpolate import splprep, splev, interp1d, splrep
from shapely.geometry import LineString, Point, GeometryCollection
from numpy.ma import arange
class SwatTestGenerator:
"""
This simple test generator creates roads using affine tratsformations of vectors.
To generate the sequences of action, e.g "go straight", "turn right", "turn left"
a Markov chain used.
This generator can quickly create a number of tests, however their fault revealing power
isn't optimized and the roads can intersect.
"""
def __init__(self, time_budget=None, executor=None, map_size=None):
self.map_size = map_size
self.time_budget = time_budget
self.executor = executor
def start(self):
road = RoadGen(self.map_size, 5, 30, 10, 80)
while self.executor.get_remaining_time() > 0:
# Some debugging
log.info(
"Starting test generation. Remaining time %s",
self.executor.get_remaining_time(),
)
# generate the road points.
# class input values correspond to maximum distance to go stright and rotation angle
road.test_case_generate()
points = interpolate_road(road.road_points)
points = remove_invalid_cases(points, self.map_size)
the_test = RoadTestFactory.create_road_test(points)
# Some more debugging
log.info("Generated test using: %s", road.road_points)
#the_test = RoadTestFactory.create_road_test(road.road_points)
# Try to execute the test
test_outcome, description, execution_data = self.executor.execute_test(
the_test
)
# Print the result from the test and continue
log.info("test_outcome %s", test_outcome)
log.info("description %s", description)
if self.executor.road_visualizer:
sleep(5)
def interpolate_road(road):
#road.sort()
#print(road)
test_road = LineString([(t[0], t[1]) for t in road])
length = test_road.length
#print("Length", length)
old_x_vals = [t[0] for t in road]
old_y_vals = [t[1] for t in road]
if len(old_x_vals) == 2:
# With two points the only option is a straight segment
k = 1
elif len(old_x_vals) == 3:
# With three points we use an arc, using linear interpolation will result in invalid road tests
k = 2
else:
# Otheriwse, use cubic splines
k = 3
f2, u = splprep([old_x_vals, old_y_vals], s=0, k=k)
step_size = 1 / (length) * 10
xnew = arange(0, 1 + step_size, step_size)
x2, y2 = splev(xnew, f2)
nodes = list(zip(x2,y2))
return nodes
def remove_invalid_cases(points, map_size):
new_list = []
i = 0
while i < len(points):
if point_in_range_2(points[i], map_size) == 1:
new_list.append(points[i])
else:
return new_list
i+=1
return new_list
def point_in_range_2(a, map_size):
"""check if point is in the acceptable range"""
if ((0 + 4) < a[0] and a[0] < (map_size- 4)) and ((0 +4) < a[1] and a[1] < (map_size - 4)):
return 1
else:
return 0
if __name__ == "__main__":
tests = SwatTestGenerator(time_budget=250000, executor="mock", map_size=200)
tests.start()
|
py | 1a34d77edd9e045cc26e7070a2e937b05a7350a2 | import regtools
import pandas as pd
from pandas import Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from numpy import nan, inf
class DataFrameTest:
df = pd.DataFrame(
data=[
(1, 2, 3),
(4, 5, nan),
(10, 11, 100)
],
columns=['y', 'x1', 'x2']
)
df_groups = pd.DataFrame(
data=[
(1, 2, 3, 'a'),
(4, 5, nan, 'a'),
(10, 11, 100, 'a'),
(2, 4, 6, 'b'),
(5, 10, nan, 'b'),
(11, 15, 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
df_groups_no_nan = pd.DataFrame(
data=[
(1, 2, 3, 'a'),
(4, 5, 8, 'a'),
(10, 11, 100, 'a'),
(2, 4, 6, 'b'),
(5, 10, 20, 'b'),
(11, 15, 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
df_groups_lag_reg = pd.DataFrame(data=[
(Timestamp('2000-01-01 00:00:00'), 1, 2, 3, nan, nan, 'a', nan, nan, nan, nan),
(Timestamp('2000-01-02 00:00:00'), 4, 5, 8, 2.0, 3.0, 'a', 3.0, 5.0, nan, nan),
(Timestamp('2000-01-03 00:00:00'), 10, 11, 100, 5.0, 8.0, 'a', 6.0, 92.0, 3.0, 5.0),
(Timestamp('2000-01-04 00:00:00'), 15, 12, 40, 11.0, 100.0, 'a', 1.0, -60.0, 6.0, 92.0),
(Timestamp('2000-01-06 00:00:00'), 22, 18, 82, 12.0, 40.0, 'a', 6.0, 42.0, 1.0, -60.0),
(Timestamp('2000-01-07 00:00:00'), 46, 10, 61, 18.0, 82.0, 'a', -8.0, -21.0, 6.0, 42.0),
(Timestamp('2000-01-01 00:00:00'), 2, 4, 6, nan, nan, 'b', nan, nan, nan, nan),
(Timestamp('2000-01-02 00:00:00'), 5, 10, 20, 4.0, 6.0, 'b', 6.0, 14.0, nan, nan),
(Timestamp('2000-01-03 00:00:00'), 11, 15, 150, 10.0, 20.0, 'b', 5.0, 130.0, 6.0, 14.0),
(Timestamp('2000-01-04 00:00:00'), 13, 12, 156, 15.0, 150.0, 'b', -3.0, 6.0, 5.0, 130.0),
(Timestamp('2000-01-07 00:00:00'), 13, 12, 156, nan, nan, 'b', nan, nan, nan, nan),
], columns=['Date', 'y', 'x1', 'x2', 'x1_lag_pregen',
'x2_lag_pregen', 'group', 'x1_diff_pregen', 'x2_diff_pregen',
'x1_diff_lag_pregen', 'x2_diff_lag_pregen'])
yvar = 'y'
xvars = ['x1', 'x2']
all_xvars = ['const', 'x1', 'x2']
groupvar = 'group'
fe_xvars = all_xvars + ['b']
class RegTest(DataFrameTest):
def _reg_test(self, df, expect_params, expect_cov, **reg_kwargs):
reg_result = regtools.reg(df, self.yvar, self.xvars, **reg_kwargs)
assert_series_equal(expect_params, reg_result.params)
assert_frame_equal(expect_cov, reg_result.cov_params())
def compare_params_and_pvalues(result1, result2):
assert (result1.params.values == result2.params.values).all()
assert (result1.pvalues.values == result2.pvalues.values).all()
#### Actual test cases below ####
class TestReg(RegTest):
def test_reg_simple(self):
expect_params = pd.Series(data = [
0.17948580753899121,
0.31490944113004432,
0.063565103400305134,
], index=self.all_xvars)
expect_cov = pd.DataFrame(data = [
(inf, inf, -inf),
(inf, inf, -inf),
(-inf, -inf, inf),
], columns=self.all_xvars, index=self.all_xvars)
self._reg_test(self.df, expect_params, expect_cov, robust=False, cluster=False)
def test_reg_nocons(self):
expect_params = pd.Series(data = [
0.41916167664670689,
0.053892215568862201,
], index=self.xvars)
expect_cov = pd.DataFrame(data = [
(inf, -inf),
(-inf, inf),
], columns =self.xvars, index=self.xvars)
self._reg_test(self.df, expect_params, expect_cov, robust=False, cluster=False, cons=False)
def test_reg_cluster(self):
expect_params = pd.Series(data=[
-0.057782704189492023,
0.56915450458771888,
0.023236241968922107,
], index=self.all_xvars)
expect_cov = pd.DataFrame(data=[
(1.3825827000648401, -0.64860156573107908, 0.037973583240341238),
(-0.64860156573067984, 0.30427401633831397, -0.017814287380373151),
(0.03797358324036447, -0.017814287380394575, 0.0010429705391543902),
], columns=self.all_xvars, index=self.all_xvars)
self._reg_test(self.df_groups, expect_params, expect_cov, robust=False, cluster=[self.groupvar])
def test_reg_fe(self):
expect_params = pd.Series(data = [
0.5140934,
0.60590361,
0.02298608,
-1.71967829,
], index=self.fe_xvars)
expect_cov = pd.DataFrame(data = [
(0.86642167435809347, -0.15421903256428801, 0.0088046661162774695, 0.10782189494174726),
(-0.15421903256428801, 0.043999645145278127, -0.0029669956299292195, -0.097047126884220028),
(0.0088046661162774695, -0.0029669956299292195, 0.00024317047738642904, 0.0056102902997011801),
(0.10782189494174726, -0.097047126884220028, 0.0056102902997011801, 0.76804342596454134),
], columns=self.fe_xvars, index=self.fe_xvars)
self._reg_test(self.df_groups_no_nan, expect_params, expect_cov, robust=False, fe=self.groupvar)
class TestLagReg(DataFrameTest):
def test_lag_reg(self):
expected_result = regtools.reg(
self.df_groups_lag_reg,
'y',
['x1_lag_pregen', 'x2_lag_pregen']
)
lag_result = regtools.reg(
self.df_groups_lag_reg,
'y',
['x1', 'x2'],
lag_variables=['x1', 'x2'],
num_lags=1,
lag_period_var='Date',
lag_id_var='group',
lag_fill_method=None
)
compare_params_and_pvalues(expected_result, lag_result)
class TestDiffReg(DataFrameTest):
def test_diff_reg(self):
expected_result = regtools.reg(
self.df_groups_lag_reg,
'y',
['x1_diff_pregen', 'x2_diff_pregen']
)
diff_result = regtools.chooser.any_reg(
'diff',
self.df_groups_lag_reg,
'y',
['x1', 'x2'],
diff_cols=['x1', 'x2'],
difference_lag=1,
date_col='Date',
id_col='group',
diff_fill_method=None
)
compare_params_and_pvalues(expected_result, diff_result)
def test_diff_reg_lag(self):
expected_result = regtools.reg(
self.df_groups_lag_reg,
'y',
['x1_diff_lag_pregen', 'x2_diff_lag_pregen']
)
diff_result = regtools.chooser.any_reg(
'diff',
self.df_groups_lag_reg,
'y',
['x1', 'x2'],
diff_cols=['x1', 'x2'],
difference_lag=1,
date_col='Date',
id_col='group',
lag_variables=['x1', 'x2'],
num_lags=1,
lag_period_var='Date',
lag_id_var='group',
diff_fill_method=None,
lag_fill_method=None,
)
compare_params_and_pvalues(expected_result, diff_result)
class TestQuantReg(DataFrameTest):
def test_quant_reg(self):
result = regtools.chooser.any_reg(
'quantile',
self.df_groups_lag_reg,
self.yvar,
self.xvars,
q=0.9
)
# TODO [#7]: check accuracy of quant reg result in test
def test_quant_reg_iter(self):
result = regtools.reg_for_each_xvar_set_and_produce_summary(
self.df_groups_lag_reg,
self.yvar,
[self.xvars, self.xvars],
reg_type='quantile',
q=0.9
) |
py | 1a34d792cb35652f79c09d60cee732d2d7d64777 | from botocore.exceptions import ClientError
# Stores found values to minimize AWS calls
PARAM_CACHE = {}
current_region = None
def get_special_param(client, func, param):
print('Getting info for func: {}, param: {}'.format(func, param))
if param in PARAM_CACHE:
return PARAM_CACHE[param]
if param == 'Bucket':
PARAM_CACHE[param] = get_bucket(client)
elif param == 'Attribute':
# Return 'Attribute directly because it doesn't need to reach out to AWS
return get_attribute(func)
elif param == 'Key':
PARAM_CACHE[param] = get_key(client)
return PARAM_CACHE[param]
def get_key(client, i=0):
try:
bucket = client.list_buckets()['Buckets'][i]['Name']
try:
key = client.list_objects_v2(
Bucket=bucket,
MaxKeys=1
).get('Contents', [{}])[0].get('Key')
return key
except KeyError:
get_key(client, i+1) # If this bucket is empty try the next one
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDeniedException':
return None
return None
def get_bucket(client):
try:
return client.list_buckets()['Buckets'][0]['Name']
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDeniedException':
return None
return None
def get_attribute(func):
FUNC_ATTRIBUTES = {
'reset_image_attribute': 'launchPermission',
'reset_instance_attribute': 'kernel',
'reset_snapshot_attribute': 'createVolumePermission',
'describe_instance_attribute': 'instanceType',
'describe_image_attribute': 'description',
'describe_snapshot_attribute': 'productCodes',
'describe_vpc_attribute': 'enableDnsSupport',
}
return FUNC_ATTRIBUTES.get(func, None)
|
py | 1a34d81bc582a92598a7f432c3d1bd2926adf40b | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Quantization define"""
import mindspore as ms
import mindspore.nn as nn
from mindspore import Parameter, Tensor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.common.initializer import initializer
#------weight symmetric, activation asymmetric------#
class QuanConv(nn.Conv2d):
r"""Conv for quantization"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_mode='same',
padding=0, dilation=1, group=1, has_bias=True):
super(QuanConv, self).__init__(in_channels, out_channels,
kernel_size, stride, pad_mode, padding, dilation, group, has_bias)
self.floor = P.Floor()
self.expand_dims = P.ExpandDims()
self.x_lower_bound = Tensor(0, ms.float32)
self.x_upper_bound = Tensor(2 ** 8 - 1, ms.float32)
self.w_lower_bound = Tensor(-2 ** 7 - 1, ms.float32)
self.w_upper_bound = Tensor(2 ** 7, ms.float32)
self.scale_a = Parameter(initializer('ones', [1]), name='scale_a')
self.scale_w = Parameter(initializer(
'ones', [out_channels]), name='scale_w')
self.zp_a = Parameter(initializer('ones', [1]), name='zp_a')
def construct(self, in_data):
r"""construct of QuantConv"""
x = self.floor(in_data / self.scale_a - self.zp_a + 0.5)
x = C.clip_by_value(x, self.x_lower_bound, self.x_upper_bound)
x = (x + self.zp_a) * self.scale_a
exp_dim_scale_w = self.scale_w
exp_dim_scale_w = self.expand_dims(exp_dim_scale_w, 1)
exp_dim_scale_w = self.expand_dims(exp_dim_scale_w, 2)
exp_dim_scale_w = self.expand_dims(exp_dim_scale_w, 3)
w = self.floor(self.weight / exp_dim_scale_w + 0.5)
w = C.clip_by_value(w, self.w_lower_bound, self.w_upper_bound)
w = w * exp_dim_scale_w
# forward
output = self.conv2d(x, w)
if self.has_bias:
output = self.bias_add(output, self.bias)
return output
|
py | 1a34d978360cf016ede2ed410fe2e7a2f0c691bd | from django.contrib.auth.models import User
from image_loader.image.models import MainImage, Image
from image_loader.plan.models import UserPlan, Plan
from rest_framework.test import APITestCase
from django.urls import reverse
from django.core.files import File
class TestAPI(APITestCase):
@classmethod
def setUpTestData(cls):
"""
Mock some objects
"""
enterprise_user = User.objects.create_user(
username="enterprise",
password="enterprise",
)
enterprise_plan = Plan.objects.create(
plan_name="Enterprise",
allowed_sizes="200 400",
acces_to_the_og=True,
ability_to_generate_expiring_links=True,
)
UserPlan.objects.create(
user=enterprise_user,
plan=enterprise_plan
)
basic_user = User.objects.create_user(
username="basic",
password="basic",
)
basic_plan = Plan.objects.create(
plan_name="Basic",
allowed_sizes="200",
acces_to_the_og=False,
ability_to_generate_expiring_links=False,
)
UserPlan.objects.create(
user=basic_user,
plan=basic_plan
)
def test_get_allowed_sizes(self):
"""
test if obj method returns correct data
"""
plan = Plan.objects.get(plan_name="Enterprise")
self.assertEqual(plan.get_allowed_sizes(), ["200", "400"])
plan = Plan.objects.get(plan_name="Basic")
self.assertEqual(plan.get_allowed_sizes(), ["200"])
def test_image_main_view_set_basic(self):
"""
image uploader, Basic Plan
"""
url = reverse("image:mainimage-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 403) ## because of unauth
user = User.objects.get(username="basic")
self.client.force_authenticate(user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200) ## auth, OK
data = {}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400) ## beacuse of empty image
data["image"] = File(open("media/test/test.jpg", "rb"))
response = self.client.post(url, data)
self.assertEqual(response.data["image_name"], "test")
images = response.data["images"]
self.assertEqual(len(images), 1) ## just image of size 200
data = {}
data["image"] = File(open("media/test/test.bmp", "rb"))
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400) ## because of the incorrect extension
self.assertEqual(str(response.data["image"][0]), "Incorrect file!")
data["image"] = File(open("media/test/test.jpg", "rb"))
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400) ## same file already exists
def test_image_main_views_set_and_detail_enterprise(self):
"""
image uploader, Enerprise Plan
"""
user = User.objects.get(username="enterprise")
self.client.force_authenticate(user)
url = reverse("image:mainimage-list")
data = {}
data["image"] = File(open("media/test/test.jpg", "rb"))
response = self.client.post(url, data)
self.assertEqual(len(response.data["images"]), 3) ## 200, 400 and original photo
url = reverse("image:mainimage-detail", kwargs={"image_name": "test"})
response = self.client.get(url)
self.assertEqual(response.data["image_name"], "test")
self.assertEqual(len(response.data["images"]), 3) ## 200, 400 and original photo
def test_generate_link_api_view(self):
"""
generating temporary links to images
"""
url = reverse("image:mainimage-list")
data = {}
data["image"] = File(open("media/test/test.jpg", "rb"))
user = User.objects.get(username="enterprise")
self.client.force_authenticate(user)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 201)
url = reverse("image:generate-link")
data = {
"expires_after": 1000,
"size": 200,
"image_name": "test"
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
link = response.data["link"]
response = self.client.get(link)
self.assertEqual(response.status_code, 200) |
py | 1a34da329d4effb47325d1ac6fff12d6b1e0bd54 | from django.test import TestCase
from django.test.client import Client
from wagtail.wagtailredirects import models
def get_default_site():
from wagtail.wagtailcore.models import Site
return Site.objects.filter(is_default_site=True).first()
def get_default_host():
return get_default_site().root_url.split('://')[1]
class TestRedirects(TestCase):
def test_path_normalisation(self):
# Shortcut to normalise function (to keep things tidy)
normalise_path = models.Redirect.normalise_path
# Create a path
path = normalise_path('/Hello/world.html?foo=Bar&Baz=quux2')
# Test against equivilant paths
self.assertEqual(path, normalise_path('/Hello/world.html?foo=Bar&Baz=quux2')) # The exact same URL
self.assertEqual(path, normalise_path('Hello/world.html?foo=Bar&Baz=quux2')) # Leading slash can be omitted
self.assertEqual(path, normalise_path('Hello/world.html/?foo=Bar&Baz=quux2')) # Trailing slashes are ignored
self.assertEqual(path, normalise_path('/Hello/world.html?foo=Bar&Baz=quux2#cool')) # Fragments are ignored
self.assertEqual(path, normalise_path('/Hello/world.html?Baz=quux2&foo=Bar')) # Order of query string paramters are ignored
# Test against different paths
self.assertNotEqual(path, normalise_path('/hello/world.html?foo=Bar&Baz=quux2')) # 'hello' is lowercase
self.assertNotEqual(path, normalise_path('/Hello/world?foo=Bar&Baz=quux2')) # No '.html'
self.assertNotEqual(path, normalise_path('/Hello/world.html?foo=bar&Baz=Quux2')) # Query string parameters have wrong case
self.assertNotEqual(path, normalise_path('/Hello/world.html?foo=Bar&baz=quux2')) # ditto
self.assertNotEqual(path, normalise_path('/Hello/WORLD.html?foo=Bar&Baz=quux2')) # 'WORLD' is uppercase
self.assertNotEqual(path, normalise_path('/Hello/world.htm?foo=Bar&Baz=quux2')) # '.htm' is not the same as '.html'
# Normalise some rubbish to make sure it doesn't crash
normalise_path('This is not a URL')
normalise_path('//////hello/world')
normalise_path('!#@%$*')
normalise_path('C:\\Program Files (x86)\\Some random program\\file.txt')
def test_basic_redirect(self):
# Get a client
c = Client()
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', site=get_default_site())
redirect.save()
# Navigate to it
r = c.get('/redirectme/', HTTP_HOST=get_default_host())
# Check that we were redirected
self.assertEqual(r.status_code, 301)
self.assertTrue(r.has_header('Location'))
def test_temporary_redirect(self):
# Get a client
c = Client()
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', site=get_default_site(), is_permanent=False)
redirect.save()
# Navigate to it
r = c.get('/redirectme/', HTTP_HOST=get_default_host())
# Check that we were redirected temporarily
self.assertEqual(r.status_code, 302)
self.assertTrue(r.has_header('Location')) |
py | 1a34da3a8a9e50909bf22030418dafb0a9f879ef | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import getClassTag
class DefaultTable(object):
dependencies = []
def __init__(self, tag=None):
if tag is None:
tag = getClassTag(self.__class__)
self.tableTag = Tag(tag)
def decompile(self, data, ttFont):
self.data = data
def compile(self, ttFont):
return self.data
def toXML(self, writer, ttFont, progress=None):
if hasattr(self, "ERROR"):
writer.comment("An error occurred during the decompilation of this table")
writer.newline()
writer.comment(self.ERROR)
writer.newline()
writer.begintag("hexdata")
writer.newline()
writer.dumphex(self.compile(ttFont))
writer.endtag("hexdata")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
from fontTools.misc.textTools import readHex
from fontTools import ttLib
if name != "hexdata":
raise ttLib.TTLibError("can't handle '%s' element" % name)
self.decompile(readHex(content), ttFont)
def __repr__(self):
return "<'%s' table at %x>" % (self.tableTag, id(self))
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.__dict__ == other.__dict__
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
|
py | 1a34da99d9ec0e7ac25c539f0413df2623a3ac12 | import logging
import pytest
log = logging.getLogger("dexbot")
log.setLevel(logging.DEBUG)
@pytest.fixture()
def worker(strategybase):
return strategybase
@pytest.mark.mandatory
def test_init(worker):
pass
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_get_operational_balance(asset, worker, monkeypatch):
share = 0.1
def get_share(*args):
return share
symbol = worker.market[asset]['symbol']
balance = worker.balance(symbol)
op_balance = worker.get_operational_balance()
assert op_balance[asset] == balance['amount']
monkeypatch.setattr(worker, 'get_worker_share_for_asset', get_share)
op_balance = worker.get_operational_balance()
assert op_balance[asset] == balance['amount'] * share
|
py | 1a34db309a1b2b19e2ee121493edbea51426ca27 | """
Please see
https://computationalmindset.com/en/neural-networks/ordinary-differential-equation-solvers.html#ode1
for details
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from torchdiffeq import odeint
ode_fn = lambda t, x: torch.sin(t) + 3. * torch.cos(2. * t) - x
an_sol = lambda t : (1./2.) * np.sin(t) - (1./2.) * np.cos(t) + \
(3./5.) * np.cos(2.*t) + (6./5.) * np.sin(2.*t) - \
(1./10.) * np.exp(-t)
t_begin=0.
t_end=10.
t_nsamples=100
t_space = np.linspace(t_begin, t_end, t_nsamples)
x_init = torch.Tensor([0.])
x_an_sol = an_sol(t_space)
x_num_sol = odeint(ode_fn, x_init, torch.Tensor(t_space))
plt.figure()
plt.plot(t_space, x_an_sol, '--', linewidth=2, label='analytical')
plt.plot(t_space, x_num_sol, linewidth=1, label='numerical')
plt.title('ODE 1st order IVP solved by TorchDiffEq')
plt.xlabel('t')
plt.ylabel('x')
plt.legend()
plt.show()
|
py | 1a34db5a18f60ec588bb51778f3fac2040aef778 | #!/usr/bin/env python2
# coding: utf-8
import re
from collections import defaultdict
from pykit.dictutil import FixedKeysDict
from .block_id import BlockID
from .block_desc import BlockDesc
from .block_group_id import BlockGroupID
from .block_index import BlockIndex
from .replication_config import ReplicationConfig
class BlockGroupBaseError(Exception):
pass
class BlockNotFoundError(BlockGroupBaseError):
pass
class BlockExists(BlockGroupBaseError):
pass
class BlockTypeNotSupported(BlockGroupBaseError):
pass
class BlockTypeNotSupportReplica(BlockGroupBaseError):
pass
def _idcs(lst):
return list(lst)
def _blocks(blocks=None):
if blocks is None:
return {}
for idx, blk in blocks.items():
blocks[idx] = BlockDesc(blk)
return blocks
class BlockGroup(FixedKeysDict):
keys_default = dict(
block_group_id=BlockGroupID,
config=ReplicationConfig,
idcs=_idcs,
blocks=_blocks,
)
ident_keys = ('block_group_id',)
def __init__(self, *args, **kwargs):
super(BlockGroup, self).__init__(*args, **kwargs)
self.type_map = self.make_type_map()
def get_block_type(self, block_index):
mp = self.type_map
bi = BlockIndex(block_index)
try:
return mp[bi.i][bi.j]
except IndexError:
raise BlockTypeNotSupported('invalid index at {bi}'.format(bi=bi))
def make_type_map(self):
cnf = self['config']
nr_data, nr_parity = cnf['in_idc']
nr_in_idc, nr_xor_idc = cnf['cross_idc']
data_replica = cnf['data_replica']
rst = []
prefixes = ('d' * nr_in_idc
+ 'x' * nr_xor_idc)
for pref in prefixes:
o = [pref + '0'] * nr_data
o += [pref + 'p'] * nr_parity
for j in range(1, data_replica):
o += ['%s%d' % (pref, j)] * nr_data
rst.append(o)
return rst
def mark_delete_block(self, block_index):
block = self.get_block(block_index, raise_error=True)
block.rm_ref()
if block.can_del():
block.mark_del()
return block
return None
def mark_delete_block_byid(self, block_id):
block = self.get_block_byid(block_id, raise_error=True)
block.rm_ref()
if block.can_del():
block.mark_del()
return block
return None
def unlink_block(self, block_index):
block = self.get_block(block_index, raise_error=True)
if not block.is_mark_del():
block.rm_ref()
if block.can_del():
del self['blocks'][str(block_index)]
return block
return None
def unlink_block_byid(self, block_id):
block = self.get_block_byid(block_id, raise_error=True)
if not block.is_mark_del():
block.rm_ref()
if block.can_del():
del self['blocks'][block_id.block_index]
return block
return None
def delete_block(self, block_index):
return self.unlink_block(block_index)
def delete_block_byid(self, block_id):
return self.unlink_block_byid(block_id)
def has(self, block):
bid = block['block_id']
bidx = bid.block_index
existent = self['blocks'].get(bidx)
return existent == block
def link_block(self, block_index):
block = self.get_block(block_index, raise_error=True)
block.add_ref()
return block
def link_block_byid(self, block_id):
block = self.get_block_byid(block_id, raise_error=True)
block.add_ref()
return block
def add_block(self, new_block, replace=False, allow_exist=False):
if self.has(new_block) and allow_exist:
return new_block
bid = new_block['block_id']
bidx = bid.block_index
prev = self['blocks'].get(bidx)
if not replace and prev is not None:
raise BlockExists(
'there is already a block at {bid}'.format(bid=bid))
self['blocks'][bidx] = new_block
if prev is None:
return None
else:
return BlockDesc(prev)
def get_free_block_indexes(self, block_type=None, get_all=False):
free_block_index = defaultdict(list)
cnf = self['config']
n = sum(cnf['cross_idc'])
m = sum(cnf['in_idc'])
for i in range(n):
for j in range(m):
bi = BlockIndex(i, j)
typ = self.get_block_type(bi)
idc = self.get_block_idc(bi)
if get_all:
# set the key 'idc' with default if key not set
free_block_index[idc]
if block_type is not None and typ != block_type:
continue
if self.get_block(bi, raise_error=False) is None:
free_block_index[idc].append(str(bi))
return free_block_index
def get_block(self, block_index, raise_error=True):
bi = BlockIndex(block_index)
b = self['blocks'].get(str(bi))
if raise_error and b is None:
raise BlockNotFoundError(
'block_index:{bi}'
' not found in block_group:{block_group_id}'.format(bi=bi, **self))
return b
def get_block_idc(self, block_index):
bi = BlockIndex(block_index)
return self['idcs'][bi.i]
def get_primary_index(self, block_index):
nr_data, nr_parity = self['config']['in_idc']
bi = BlockIndex(block_index)
j = bi.j
if j >= nr_data:
j -= nr_parity
j %= nr_data
return BlockIndex(bi.i, j)
def get_replica_indexes(self, block_index, include_me=True):
nr_data, nr_parity = self['config']['in_idc']
data_replica = self['config']['data_replica']
bi = BlockIndex(block_index)
typ = self.get_block_type(bi)
if typ.endswith('p'):
raise BlockTypeNotSupportReplica(
'block type {typ}'
' does not support replica'.format(typ=typ))
pbi = self.get_primary_index(block_index)
rst = [str(pbi)]
for j in range(1, data_replica):
rbi = BlockIndex(pbi.i,
pbi.j + nr_parity + j * nr_data)
rst.append(str(rbi))
# if not include_me and str(block_index) in rst:
if not include_me:
rst.remove(str(block_index))
return rst
def classify_blocks(self, idc_index, only_primary=True):
nr_data, nr_parity = self['config']['in_idc']
ec = []
replica = []
mark_del = []
for i in range(0, nr_data):
bi = BlockIndex(idc_index, i)
blk = self.get_block(bi, raise_error=False)
if blk is None:
continue
if blk.is_mark_del():
mark_del.append(blk)
continue
replica_idxes = self.get_replica_indexes(bi, include_me=False)
rblks = self.indexes_to_blocks(replica_idxes)
if None in rblks:
ec.append(blk)
continue
replica.append(blk)
if only_primary:
continue
replica.extend(rblks)
return {'ec': ec, 'replica': replica, 'mark_del': mark_del}
def indexes_to_blocks(self, indexes):
blks = []
for idx in indexes:
bi = BlockIndex(idx)
blk = self.get_block(bi, raise_error=False)
blks.append(blk)
return blks
def get_parity_indexes(self, idc_index):
indexes = []
nr_data, nr_parity = self['config']['in_idc']
for i in range(nr_data, nr_data + nr_parity):
bi = BlockIndex(idc_index, i)
indexes.append(bi)
return indexes
def get_parities(self, idc_index):
idxes = self.get_parity_indexes(idc_index)
blks = self.indexes_to_blocks(idxes)
return [blk for blk in blks if blk is not None]
def is_ec_block(self, block_id):
block_id = BlockID(block_id)
blk = self.get_block(block_id.block_index, raise_error=False)
if blk is None or blk['block_id'] != block_id:
raise BlockNotFoundError(
'block_id:{bid}'
' not found in block_group:{block_group_id}'.format(bid=block_id, **self))
if block_id.type.endswith('p'):
blk = self.get_block(block_id.block_index, raise_error=True)
return True
r_indexes = self.get_replica_indexes(block_id.block_index)
r_blks = [self.get_block(x, raise_error=False) for x in r_indexes]
return None in r_blks
def get_blocks(self):
blks = []
for idx in sorted(self['blocks'].keys()):
blk = self['blocks'][idx]
blks.append(blk)
return blks
def get_ec_blocks(self, idc_idx):
nr_data, nr_parity = self['config']['in_idc']
blks = []
for i in range(0, nr_data + nr_parity):
blk = self.get_block(BlockIndex(idc_idx, i), raise_error=False)
if blk is None:
continue
if self.is_ec_block(blk['block_id']):
blks.append(blk)
return blks
def get_ec_broken_blocks(self, idc_idx, broken_bids):
broken_blks = []
for blk in self.get_ec_blocks(idc_idx):
if blk['block_id'] in broken_bids:
broken_blks.append(blk)
return broken_blks
def get_ec_block_ids(self, idc_idx):
bids = []
for blk in self.get_ec_blocks(idc_idx):
bids.append(blk['block_id'])
return bids
def get_replica_blocks(self, block_id, include_me=True, raise_error=True):
block_id = BlockID(block_id)
r_indexes = self.get_replica_indexes(block_id.block_index, True)
is_exist = False
blks = []
for idx in r_indexes:
blk = self.get_block(idx, raise_error=False)
if blk is None:
continue
if blk['block_id'] == block_id:
is_exist = True
if not include_me:
continue
blks.append(blk)
if not is_exist:
if raise_error:
raise BlockNotFoundError(self['block_group_id'], block_id)
else:
return None
return blks
def get_block_byid(self, block_id, raise_error=True):
block_id = BlockID(block_id)
blk = self.get_block(block_id.block_index, raise_error=False)
if blk is None or blk['block_id'] != block_id:
if raise_error:
raise BlockNotFoundError(self['block_group_id'], block_id)
else:
return None
return blk
def get_idc_blocks(self, idc_idx, is_del=None, types=None):
blks = []
for idx in sorted(self['blocks'].keys()):
blk = self['blocks'][idx]
idx = BlockIndex(idx)
typ = self.get_block_type(idx)
if types is not None and typ not in types:
continue
if idx.i != idc_idx:
continue
if is_del is not None and blk['is_del'] != is_del:
continue
blks.append(blk)
return blks
def get_idc_blocks_no_replica(self, idc_idx, is_del=None):
types = ['d0', 'dp', 'x0', 'xp']
return self.get_idc_blocks(idc_idx, is_del=is_del, types=types)
def get_d0_idcs(self):
cross_idc = self["config"]["cross_idc"]
return self["idcs"][:cross_idc[0]]
def get_dtype_by_idc(self, idc):
cfg = self["config"]
assert idc in self["idcs"]
assert sum(cfg["cross_idc"]) == len(self["idcs"])
d0_idcs = self["idcs"][:cfg["cross_idc"][0]]
if idc in d0_idcs:
return "d0"
else:
return "x0"
def get_idc_block_ids(self, idc_idx, is_del=None, types=None):
blks = self.get_idc_blocks(idc_idx, is_del=is_del, types=types)
return [BlockID(b['block_id']) for b in blks]
def get_idc_block_ids_no_replica(self, idc_idx, is_del=None):
types = ['d0', 'dp', 'x0', 'xp']
return self.get_idc_block_ids(idc_idx, is_del=is_del, types=types)
@classmethod
def is_data(cls, block_id):
return block_id.type in ('d0', 'x0')
@classmethod
def is_replica(cls, block_id):
return re.match(r'd[1-9]', block_id.type) is not None
@classmethod
def is_parity(cls, block_id):
return block_id.type in ('dp', 'xp')
|
bzl | 1a34dbaad4735386fc58a2677dea600572d9f80b | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Qt build rules."""
load(
"//:build_defs.bzl",
"cc_binary_mozc",
"cc_library_mozc",
"select_mozc",
)
load(
"//:config.bzl",
"MACOS_BUNDLE_ID_PREFIX",
"MACOS_MIN_OS_VER",
"QT_BIN_PATH",
)
load("@build_bazel_rules_apple//apple:macos.bzl", "macos_application")
def cc_qt_library_mozc(name, deps = [], **kwargs):
cc_library_mozc(
name = name,
deps = deps + select_mozc(
default = ["//third_party/qt:qt_native"],
oss_linux = ["@io_qt//:qt"],
oss_macos = ["@io_qt//:qt_mac"],
),
**kwargs
)
def cc_qt_binary_mozc(name, deps = [], **kwargs):
cc_binary_mozc(
name = name,
deps = deps + select_mozc(
default = ["//third_party/qt:qt_native"],
oss_linux = ["@io_qt//:qt"],
oss_macos = ["@io_qt//:qt_mac"],
),
**kwargs
)
def qt_moc_mozc(name, srcs, outs):
native.genrule(
name = name,
srcs = srcs,
outs = outs,
cmd = select_mozc(
default = "$(location //third_party/qt:moc) -p $$(dirname $<) -o $@ $(SRCS)",
oss = QT_BIN_PATH + "moc -p $$(dirname $<) -o $@ $(SRCS)",
),
tools = select_mozc(
default = ["//third_party/qt:moc"],
oss = [],
),
)
def qt_uic_mozc(name, srcs, outs):
native.genrule(
name = name,
srcs = srcs,
outs = outs,
cmd = select_mozc(
default = "$(location //third_party/qt:uic) -o $@ $(SRCS)",
oss = QT_BIN_PATH + "uic -o $@ $(SRCS)",
),
tools = select_mozc(
default = ["//third_party/qt:uic"],
oss = [],
),
)
def qt_rcc_mozc(name, qrc_name, qrc_file, srcs, outs):
native.genrule(
name = name,
srcs = [qrc_file] + srcs,
outs = outs,
cmd = select_mozc(
default = "$(location //third_party/qt:rcc) -o $@ -name " + qrc_name + " " + qrc_file,
oss = QT_BIN_PATH + "rcc -o $@ -name " + qrc_name + " $(location " + qrc_file + ")",
),
tools = select_mozc(
default = ["//third_party/qt:rcc"],
oss = [],
),
)
def macos_qt_application_mozc(name, bundle_name, deps):
macos_application(
name = name,
tags = ["manual"],
additional_contents = select_mozc(
default = {},
oss = {"@io_qt//:libqcocoa": "Resources"},
),
app_icons = ["//data/images/mac:product_icon.icns"],
bundle_id = MACOS_BUNDLE_ID_PREFIX + ".Tool." + bundle_name,
bundle_name = bundle_name,
infoplists = ["//gui:mozc_tool_info_plist"],
minimum_os_version = MACOS_MIN_OS_VER,
resources = [
"//data/images/mac:candidate_window_logo.tiff",
"//gui:qt_conf",
],
visibility = ["//:__subpackages__"],
deps = deps + select_mozc(
default = [],
oss = [
"@io_qt//:QtCore_mac",
"@io_qt//:QtGui_mac",
"@io_qt//:QtPrintSupport_mac",
"@io_qt//:QtWidgets_mac",
],
),
)
|
py | 1a34ddc4f54c75fc0de25f99bdae87a0a841ea7b | from typing import Union
from pyrogram.types import Message, Audio, Voice
async def convert_count(count):
if int(count) == 1:
x = "First"
elif int(count) == 2:
x = "Second"
elif int(count) == 3:
x = "Third"
elif int(count) == 4:
x = "Fourth"
elif int(count) == 5:
x = "Fifth"
elif int(count) == 6:
x = "Sixth"
elif int(count) == 7:
x = "Seventh"
elif int(count) == 8:
x = "Eighth"
elif int(count) == 9:
x = "Ninth"
elif int(count) == 10:
x = "Tenth"
elif int(count) == 11:
x = "Eleventh"
elif int(count) == 12:
x = "Twelfth"
elif int(count) == 13:
x = "Thirteenth"
elif int(count) == 14:
x = "Fourteenth"
elif int(count) == 15:
x = "Fifteenth"
elif str(count) == "all":
x = "all"
return x
def get_url(message_1: Message) -> Union[str, None]:
messages = [message_1]
if message_1.reply_to_message:
messages.append(message_1.reply_to_message)
text = ""
offset = None
length = None
for message in messages:
if offset:
break
if message.entities:
for entity in message.entities:
if entity.type == "url":
text = message.text or message.caption
offset, length = entity.offset, entity.length
break
if offset in (None,):
return None
return text[offset:offset + length]
random_assistant = ["5", "1", "2", "3", "4"]
themes = ["LightBlue"]
def bytes(size: float) -> str:
"""humanize size"""
if not size:
return ""
power = 1024
t_n = 0
power_dict = {0: " ", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
t_n += 1
return "{:.2f} {}B".format(size, power_dict[t_n])
async def ass_det(assistant: int):
print(" 𝓡𝓲𝓭𝓱𝓪𝓶 𝓶𝓾𝓼𝓲𝓬𝓬")
|
py | 1a34de34e9eed42f596e59901dac7d4a55c0eb42 | from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import os
import numpy as np
import tensorflow as tf
from modules.evaluations import get_val_data, perform_val
from modules.models import ArcFaceModel
from modules.utils import set_memory_growth, load_yaml, l2_norm
flags.DEFINE_string('cfg_path', './configs/arc_res50.yaml', 'config file path')
flags.DEFINE_string('gpu', '0', 'which gpu to use')
flags.DEFINE_string('img_path', '', 'path to input image')
def main(_argv):
gpus = tf.config.list_logical_devices('GPU')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = len(gpus)
logger = tf.get_logger()
logger.disabled = True
logger.setLevel(logging.FATAL)
cfg = load_yaml(FLAGS.cfg_path)
gpus = tf.config.list_logical_devices('GPU')
strategy = tf.distribute.MirroredStrategy(gpus)
with strategy.scope():
model = ArcFaceModel(size=cfg['input_size'],
backbone_type=cfg['backbone_type'],
training=False)
model.load_weights(cfg['pre_trained_model'])
print(model.summary())
print("[*] Loading LFW, AgeDB30 and CFP-FP...")
lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame = \
get_val_data(cfg['test_dataset'])
print("[*] Perform Evaluation on LFW...")
acc_lfw, best_th = perform_val(
cfg['embd_shape'], cfg['batch_size'], model, lfw, lfw_issame,
is_ccrop=cfg['is_ccrop'])
print(" acc {:.4f}, th: {:.2f}".format(acc_lfw, best_th))
print("[*] Perform Evaluation on AgeDB30...")
acc_agedb30, best_th = perform_val(
cfg['embd_shape'], cfg['batch_size'], model, agedb_30,
agedb_30_issame, is_ccrop=cfg['is_ccrop'])
print(" acc {:.4f}, th: {:.2f}".format(acc_agedb30, best_th))
print("[*] Perform Evaluation on CFP-FP...")
acc_cfp_fp, best_th = perform_val(
cfg['embd_shape'], cfg['batch_size'], model, cfp_fp, cfp_fp_issame,
is_ccrop=cfg['is_ccrop'])
print(" acc {:.4f}, th: {:.2f}".format(acc_cfp_fp, best_th))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
py | 1a34df168724abab1833ca08ea74a06209bf3087 | '''Application config'''
# Configurable parameters.
DEBUG = True
API_HOST = "localhost:1055"
# Do not change
API_URL = "http://" + API_HOST + "/api"
|
py | 1a34dfdd5552a04a8e66478a97cd7b94b70bb5dd | # This file is execfile()d with the current directory set to its containing dir.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import shutil
# -- Path setup --------------------------------------------------------------
__location__ = os.path.dirname(__file__)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, "../src"))
# -- Run sphinx-apidoc -------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/readthedocs/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/wai_data_tools")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
cmd_line = f"sphinx-apidoc --implicit-namespaces -f -o {output_dir} {module_dir}"
args = cmd_line.split(" ")
if tuple(sphinx.__version__.split(".")) >= ("1", "7"):
# This is a rudimentary parse_version to avoid external dependencies
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "wai_data_tools"
copyright = "2022, David Andersson"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# version: The short X.Y version.
# release: The full version, including alpha/beta/rc tags.
# If you don’t need the separation provided between version and release,
# just set them both to the same value.
try:
from wai_data_tools import __version__ as version
except ImportError:
version = ""
if not version or version.lower() == "unknown":
version = os.getenv("READTHEDOCS_VERSION", "unknown") # automatically set by RTD
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_width": "300px",
"page_width": "1200px"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "wai_data_tools-doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
# "preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "user_guide.tex", "wai_data_tools Documentation", "David Andersson", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping --------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"setuptools": ("https://setuptools.readthedocs.io/en/stable/", None),
"pyscaffold": ("https://pyscaffold.org/en/stable", None),
}
print(f"loading configurations for {project} {version} ...", file=sys.stderr)
|
py | 1a34e0cf7439f2b8bd8f21ea064cbebc6445203c | import json
import urllib
import aiohttp
from aiocache import cached
client_session = None
@cached(ttl=3600)
async def get_ios_cfw():
"""Gets all apps on ios.cfw.guide
Returns
-------
dict
"ios, jailbreaks, devices"
"""
async with client_session.get("https://api.appledb.dev/main.json") as resp:
if resp.status == 200:
data = await resp.json()
return data
@cached(ttl=3600)
async def get_ipsw_firmware_info(version: str):
"""Gets all apps on ios.cfw.guide
Returns
-------
dict
"ios, jailbreaks, devices"
"""
async with client_session.get(f"https://api.ipsw.me/v4/ipsw/{version}") as resp:
if resp.status == 200:
data = await resp.json()
return data
return []
@cached(ttl=600)
async def get_dstatus_components():
async with client_session.get("https://discordstatus.com/api/v2/components.json") as resp:
if resp.status == 200:
components = await resp.json()
return components
@cached(ttl=600)
async def get_dstatus_incidents():
async with client_session.get("https://discordstatus.com/api/v2/incidents.json") as resp:
if resp.status == 200:
incidents = await resp.json()
return incidents
async def canister_search_package(query):
"""Search for a tweak in Canister's catalogue
Parameters
----------
query : str
"Query to search for"
Returns
-------
list
"List of packages that Canister found matching the query"
"""
async with client_session.get(f'https://api.canister.me/v1/community/packages/search?query={urllib.parse.quote(query)}&searchFields=identifier,name&responseFields=identifier,header,tintColor,name,price,description,packageIcon,repository.uri,repository.name,author,maintainer,latestVersion,nativeDepiction,depiction') as resp:
if resp.status == 200:
response = json.loads(await resp.text())
if response.get('status') == "Successful":
return response.get('data')
else:
return None
else:
return None
async def canister_search_repo(query):
"""Search for a repo in Canister's catalogue
Parameters
----------
query : str
"Query to search for"
Returns
-------
list
"List of repos that Canister found matching the query"
"""
async with client_session.get(f'https://api.canister.me/v1/community/repositories/search?query={urllib.parse.quote(query)}') as resp:
if resp.status == 200:
response = json.loads(await resp.text())
if response.get('status') == "Successful":
return response.get('data')
else:
return None
else:
return None
@cached(ttl=3600)
async def canister_fetch_repos():
async with client_session.get('https://api.canister.me/v1/community/repositories/search?ranking=1,2,3,4,5') as resp:
if resp.status == 200:
response = await resp.json(content_type=None)
return response.get("data")
return None
@cached(ttl=3600)
async def fetch_scam_urls():
async with client_session.get("https://raw.githubusercontent.com/SlimShadyIAm/Anti-Scam-Json-List/main/antiscam.json") as resp:
if resp.status == 200:
obj = json.loads(await resp.text())
return obj
async def init_client_session():
global client_session
client_session = aiohttp.ClientSession()
|
py | 1a34e2166ecebd9ef33193d37725f25cc3d3a86b | from rest_framework import filters, status, viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from profiles_api import models, permissions, serializers
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
api_view = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URL\'s',
]
return Response({'message': 'Hello!', 'api_view': api_view})
def post(self, request):
"""Creates a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def put(self, request, pk=None):
"""Handles updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handles a partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Deletes an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Returns a hello message"""
view_set = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URL\'s using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'view_set': view_set})
def create(self, request):
"""Creates a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Handles getting an object by it's ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handles updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handles partially updating an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handles removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handles creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handles creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
|
py | 1a34e386a228d6e08ad91f2065f14420bfcdf2e1 | import uuid
from django.db import models
from core.models.base import StandardModel
from core.models.base import CaseInsensitiveNamedModel
from core.models import Material
from core.models import Source
from core.models import SampleType
from core.models import Storage
from core.models import Project
from core.models import Note
from django.contrib.auth.models import User
from django.contrib.contenttypes import fields
from django.core.urlresolvers import reverse
from django.db import connection
from core import constants
from core import utils
from polymorphic.models import PolymorphicModel
import logging
logger = logging.getLogger(__name__)
class Sample(PolymorphicModel, CaseInsensitiveNamedModel):
STATUSES = utils.self_zip(constants.STANDARD_STATUSES)
sample_type = models.ForeignKey(SampleType)
material = models.ForeignKey(Material)
status = models.CharField(max_length=255,choices=STATUSES,default=constants.STATUS_ACTIVE)
owner = models.ForeignKey(User,null=True,blank=True)
source = models.ForeignKey(Source,null=True,blank=True)
lot = models.CharField(max_length=255, null=True, blank=True)
volume = models.CharField(max_length=255, null=True, blank=True)
concentration = models.CharField(max_length=255, null=True, blank=True)
concentration_units = models.CharField(max_length=255, null=True, blank=True)
project = models.ManyToManyField(Project,blank=True)
storage = models.ForeignKey(Storage,null=True, blank=True)
unit_count = models.CharField(max_length=255, null=True, blank=True)
notes = fields.GenericRelation(Note)
sample_links = models.ManyToManyField(
'self',
through='SampleToSample',
symmetrical=False,
related_name="linked_to",
blank=True
)
def _has_alert_note(self):
logger.debug('looking for alert note')
return self.notes.filter(note_type=constants.TYPE_ALERT).exists()
has_alert_note = property(_has_alert_note)
class Meta:
app_label = "core"
db_table = 'sample'
verbose_name_plural = 'samples'
unique_together = ("name",)
ordering = ['-date_created']
def save(self, *args, **kwargs):
if not self.name:
self.name = Sample.name_generator()
super(Sample, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('samples-detail', kwargs={'pk': self.pk})
def __str__(self):
return self.name
def add_sample_link(self, sample, link_type):
link, created = SampleToSample.objects.get_or_create(
source_sample=self,
target_sample=sample,
type=link_type
)
return link
def remove_sample_link(self, sample, link_type):
SampleToSample.objects.filter(
source_sample=self,
target_sample=sample,
type=link_type
).delete()
return
def get_sample_links(self, link_type):
return self.sample_links.filter(
target_samples__type=link_type,
target_samples__source_sample=self
)
def get_related_to(self, link_type):
return self.linked_to.filter(
source_samples__type=link_type,
source_samples__target_sample=self
)
def get_children(self):
logger.debug("in generic get children")
link_type = SampleLink.objects.get(name=constants.LINK_TYPE_CHILD)
return self.get_sample_links(link_type)
def get_parents(self):
logger.debug("in generic get parents")
link_type = SampleLink.objects.get(name=constants.LINK_TYPE_PARENT)
return self.get_related_to(link_type)
@classmethod
def name_generator(cls):
return "S-{0}".format(uuid.uuid4())
# get the next value in the sequence based on the record name
# record_1 would generate 2
# record_10 would generate 11
@staticmethod
def get_operational_index(value):
sql_string = """
select max(
to_number(
substring(name from char_length(%(value)s) + position(%(value)s in name)),
'999'
) + 1
) from sample
where name ~ (%(value)s || '[0-9]+$');
"""
index = 1
try:
cursor = connection.cursor()
cursor.execute(sql_string, {'value': value})
row = cursor.fetchone()
logger.debug(row)
index = row[0]
if index is None:
index = 1
except Exception as e:
logger.debug(e)
logger.debug("exception while looking up values")
index = 1
logger.debug("returning the following index {0}".format(index))
return index
Sample._meta.get_field('name').null = True
Sample._meta.get_field('name').blank = True
class SampleLink(StandardModel):
class Meta:
app_label = "core"
db_table = 'sample_link'
verbose_name_plural = 'sample links'
def __str__(self):
return self.name
class SampleToSample(models.Model):
source_sample = models.ForeignKey(Sample, related_name='source_samples')
target_sample = models.ForeignKey(Sample, related_name='target_samples')
type = models.ForeignKey(SampleLink)
class Meta:
app_label = "core"
db_table = 'sample_to_sample'
verbose_name_plural = 'sample to samples'
|
py | 1a34e4600fa7dee5ed063b72012810b978467181 | # improvement_meta.py
# author: Ahmed Bin Zaman
# since: 02/2021
"""Module for improving the fitness of a conformation.
This module provides functionalities like local search to improve the
current fitness of a given conformation. The bookkeeping is for
metamorphic proteins with four native structures.
Available Classes:
- Improvement: Encapsulates the operations to improve fitness of a
conformation.
"""
import pyrosetta as pr
import math
class Improvement:
"""Encapsulates the operations to improve fitness of a conformation.
Provides functionalities like local search to improve the current
fitness of a given conformation.
Public Attributes:
- native_pose1: Contains the firts native pose provided in the
constructor (pyrosetta Pose object).
- native_pose2: Contains the firts native pose provided in the
constructor (pyrosetta Pose object).
- total_energy_evals: Total number of energy evaluations done in
all the operations performed (integer).
- last_op_energy_evals: Number of energy evaluations done in the
last operation performed (integer).
- min_ca_rmsd: Minimum Ca-RMSD value to the native conformation
among all the conformations generated in all the operations
(float)
- last_op_min_ca_rmsd: Minimum Ca-RMSD value to the native
conformation among all the conformations generated in the last
operation performed (float).
- min_ca_rmsd_pose: Conformation with minimum Ca-RMSD value to the
native conformation among all the conformations generated in all
the operations (pyrosetta Pose object).
- last_op_min_ca_rmsd_pose: Conformation with minimum Ca-RMSD value
to the native conformation among all the conformations generated
in the last operation performed (pyrosetta Pose object).
Available methods:
- local_search: Performs greedy local search to improve fitness of a
conformation.
"""
def __init__(self, native_pose1, native_pose2, native_pose3, native_pose4):
"""Constructor
Args:
native_pose: A pyrosetta Pose object containing the native
conformation. This is used for minimum Ca-RMSD
calculation. If you don't need this calculation, or
don't have the native conformation, just provide a
random Pose object.
"""
self.native_pose1 = pr.Pose()
self.native_pose1.assign(native_pose1)
self.native_pose2 = pr.Pose()
self.native_pose2.assign(native_pose2)
self.native_pose3 = pr.Pose()
self.native_pose3.assign(native_pose3)
self.native_pose4 = pr.Pose()
self.native_pose4.assign(native_pose4)
self.total_energy_evals = 0
self.last_op_energy_evals = 0
self.min_ca_rmsd1 = math.inf
self.last_op_min_ca_rmsd1 = math.inf
self.min_ca_rmsd_pose1 = pr.Pose()
self.last_op_min_ca_rmsd_pose1 = pr.Pose()
self.min_ca_rmsd2 = math.inf
self.last_op_min_ca_rmsd2 = math.inf
self.min_ca_rmsd_pose2 = pr.Pose()
self.last_op_min_ca_rmsd_pose2 = pr.Pose()
self.min_ca_rmsd3 = math.inf
self.last_op_min_ca_rmsd3 = math.inf
self.min_ca_rmsd_pose3 = pr.Pose()
self.last_op_min_ca_rmsd_pose3 = pr.Pose()
self.min_ca_rmsd4 = math.inf
self.last_op_min_ca_rmsd4 = math.inf
self.min_ca_rmsd_pose4 = pr.Pose()
self.last_op_min_ca_rmsd_pose4 = pr.Pose()
def local_search(self, pose, mover, score_function, successive_failures):
"""Performs greedy local search to improve fitness of a
conformation.
This local search performs specific moves to map a conformation
to a nearby local minimum in the energy surface. The search is
terminated when a specific number of moves fail to improve the
score based on a specific fitness function.
Args:
pose: A pyrosetta Pose object containing initial
conformation.
mover: A pyrosetta Mover object derermining the moves in
local search.
score_function: A pyrosetta ScoreFunction object for scoring
each move.
successive_failures: An int indicating the threshold for
consecutive number of failed moves in each trajectory.
Returns:
A pyrosetta Pose object containing the conformation with
locally minimum fitness.
"""
local_minima = pr.Pose()
local_minima.assign(pose)
new_pose = pr.Pose()
new_pose.assign(pose)
self.last_op_min_ca_rmsd1 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose1, new_pose
)
self.last_op_min_ca_rmsd2 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose2, new_pose
)
self.last_op_min_ca_rmsd3 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose3, new_pose
)
self.last_op_min_ca_rmsd4 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose4, new_pose
)
local_minima_score = score_function(local_minima)
self.last_op_energy_evals = 1
failed = 0
# Perform greedy local search
while failed < successive_failures:
mover.apply(new_pose)
pose_ca_rmsd1 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose1, new_pose
)
pose_ca_rmsd2 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose2, new_pose
)
pose_ca_rmsd3 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose3, new_pose
)
pose_ca_rmsd4 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose4, new_pose
)
if pose_ca_rmsd1 < self.last_op_min_ca_rmsd1:
self.last_op_min_ca_rmsd1 = pose_ca_rmsd1
self.last_op_min_ca_rmsd_pose1.assign(new_pose)
if pose_ca_rmsd2 < self.last_op_min_ca_rmsd2:
self.last_op_min_ca_rmsd2 = pose_ca_rmsd2
self.last_op_min_ca_rmsd_pose2.assign(new_pose)
if pose_ca_rmsd3 < self.last_op_min_ca_rmsd3:
self.last_op_min_ca_rmsd3 = pose_ca_rmsd3
self.last_op_min_ca_rmsd_pose3.assign(new_pose)
if pose_ca_rmsd4 < self.last_op_min_ca_rmsd4:
self.last_op_min_ca_rmsd4 = pose_ca_rmsd4
self.last_op_min_ca_rmsd_pose4.assign(new_pose)
current_score = score_function(new_pose)
self.last_op_energy_evals += 1
if current_score < local_minima_score:
local_minima.assign(new_pose)
local_minima_score = current_score
failed = 0
else:
failed += 1
# Bookkeeping
self.total_energy_evals += self.last_op_energy_evals
if self.last_op_min_ca_rmsd1 < self.min_ca_rmsd1:
self.min_ca_rmsd1 = self.last_op_min_ca_rmsd1
self.min_ca_rmsd_pose1.assign(self.last_op_min_ca_rmsd_pose1)
if self.last_op_min_ca_rmsd2 < self.min_ca_rmsd2:
self.min_ca_rmsd2 = self.last_op_min_ca_rmsd2
self.min_ca_rmsd_pose2.assign(self.last_op_min_ca_rmsd_pose2)
if self.last_op_min_ca_rmsd3 < self.min_ca_rmsd3:
self.min_ca_rmsd3 = self.last_op_min_ca_rmsd3
self.min_ca_rmsd_pose3.assign(self.last_op_min_ca_rmsd_pose3)
if self.last_op_min_ca_rmsd4 < self.min_ca_rmsd4:
self.min_ca_rmsd4 = self.last_op_min_ca_rmsd4
self.min_ca_rmsd_pose4.assign(self.last_op_min_ca_rmsd_pose4)
return local_minima
|
py | 1a34e4f73f9b1aa1a153cd2a98714720aa85a727 | import logging
from datetime import datetime
import xml.etree.ElementTree as ET
from indra.statements import *
from indra.statements.statements import Migration
from indra.statements.context import MovementContext
from indra.util import UnicodeXMLTreeBuilder as UTB
logger = logging.getLogger(__name__)
class CWMSError(Exception):
pass
POLARITY_DICT = {'CC': {'ONT::CAUSE': 1,
'ONT::INFLUENCE': 1},
'EVENT': {'ONT::INCREASE': 1,
'ONT::MODULATE': None,
'ONT::DECREASE': -1,
'ONT::INHIBIT': -1,
'ONT::TRANSFORM': None,
'ONT::STIMULATE': 1,
'ONT::ARRIVE': None,
'ONT::DEPART': None,
'ONT::MOVE': None,
'ONT::BE': None},
'EPI': {'ONT::ASSOCIATE': None}}
class CWMSProcessor(object):
"""The CWMSProcessor currently extracts causal relationships between
terms (nouns) in EKB. In the future, this processor can be extended to
extract other types of relations, or to extract relations involving
events.
For more details on the TRIPS EKB XML format, see
http://trips.ihmc.us/parser/cgi/drum
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) in XML format as a string.
Attributes
----------
tree : xml.etree.ElementTree.Element
An ElementTree object representation of the TRIPS EKB XML.
doc_id: str
Document ID
statements : list[indra.statements.Statement]
A list of INDRA Statements that were extracted from the EKB.
sentences : dict[str: str]
The list of all sentences in the EKB with their IDs
paragraphs : dict[str: str]
The list of all paragraphs in the EKB with their IDs
par_to_sec : dict[str: str]
A map from paragraph IDs to their associated section types
"""
def __init__(self, xml_string):
self.statements = []
# Parse XML
try:
self.tree = ET.XML(xml_string, parser=UTB())
except ET.ParseError:
logger.error('Could not parse XML string')
self.tree = None
return
# Get the document ID from the EKB tag.
self.doc_id = self.tree.attrib.get('id')
# Store all paragraphs and store all sentences in a data structure
paragraph_tags = self.tree.findall('input/paragraphs/paragraph')
sentence_tags = self.tree.findall('input/sentences/sentence')
self.paragraphs = {p.attrib['id']: p.text for p in paragraph_tags}
self.sentences = {s.attrib['id']: s.text for s in sentence_tags}
self.par_to_sec = {p.attrib['id']: p.attrib.get('sec-type')
for p in paragraph_tags}
# Keep a list of events that are part of relations and events
# subsumed by other events
self.relation_events = set()
self.subsumed_events = set()
# Keep a list of unhandled events for development purposes
self._unhandled_events = set()
self._preprocess_events()
def _preprocess_events(self):
events = self.tree.findall("EVENT/[type]")
for event in events:
affected = event.find("*[@role=':AFFECTED']")
if affected is not None:
affected_id = affected.attrib.get('id')
if affected_id:
self.subsumed_events.add(affected_id)
def extract_causal_relations(self):
"""Extract Influence Statements from the EKB."""
relations = self.tree.findall("CC/[type]")
for relation in relations:
st = self.influence_from_relation(relation)
if st:
self.statements.append(st)
events = self.tree.findall("EVENT/[type]")
for event in events:
st = self.influence_from_event(event)
if st:
self.statements.append(st)
# In some EKBs we get two redundant relations over the same arguments,
# we eliminate these
self._remove_multi_extraction_artifacts()
# Print unhandled event types
logger.debug('Unhandled event types: %s' %
(', '.join(sorted(self._unhandled_events))))
def extract_events(self):
"""Extract standalone Events from the EKB."""
events = [(1, self.tree.findall("EVENT/[type='ONT::INCREASE']")),
(-1, self.tree.findall("EVENT/[type='ONT::DECREASE']"))]
for polarity, event_list in events:
for event_term in event_list:
event_id = event_term.attrib.get('id')
if event_id in self.subsumed_events or \
event_id in self.relation_events:
continue
event = self.event_from_event(event_term)
if event:
# Here we set the polarity based on the polarity implied by
# the increase/decrease here
event.delta.set_polarity(polarity)
self.statements.append(event)
self._remove_multi_extraction_artifacts()
def extract_migrations(self, include_relation_arg=False):
ev_types = ['ONT::MOVE', 'ONT::DEPART', 'ONT::ARRIVE']
events = []
for et in ev_types:
evs = self.tree.findall("EVENT/[type='%s']" % et)
events += evs
for event_term in events:
event_id = event_term.attrib.get('id')
if event_id in self.subsumed_events or \
(not include_relation_arg and
event_id in self.relation_events):
continue
event = self.migration_from_event(event_term)
if event is not None:
self.statements.append(event)
self._remove_multi_extraction_artifacts()
def extract_correlations(self):
correlations = self.tree.findall("EPI/[type='ONT::ASSOCIATE']")
for cor in correlations:
st = self._association_from_element(cor, 'EPI', 'NEUTRAL1',
'NEUTRAL2', False)
if st:
self.statements.append(st)
# self._remove_multi_extraction_artifacts()
def _influence_from_element(self, element, element_type, subj_arg,
obj_arg, is_arg):
components = self._statement_components_from_element(
element, element_type, subj_arg, obj_arg, is_arg)
if components is None:
return None
subj, obj, evidence, rel_type = components
# If the object polarity is not given explicitly, we set it
# based on the one implied by the relation
if obj.delta.polarity is None:
obj.delta.set_polarity(POLARITY_DICT[element_type][rel_type])
st = Influence(subj, obj, evidence=[evidence])
return st
def influence_from_relation(self, relation):
"""Return an Influence from a CC element in the EKB."""
return self._influence_from_element(relation, 'CC', 'FACTOR',
'OUTCOME', True)
def influence_from_event(self, event):
"""Return an Influence from an EVENT element in the EKB."""
return self._influence_from_element(event, 'EVENT', 'AGENT',
'AFFECTED', False)
def _statement_components_from_element(self, element, element_type,
member1_arg, member2_arg, is_arg):
element_id = element.attrib.get('id')
rel_type = element.find('type').text
if rel_type not in POLARITY_DICT[element_type]:
self._unhandled_events.add(rel_type)
return None
member1_id, member1_term = self._get_term_by_role(
element, member1_arg, is_arg)
member2_id, member2_term = self._get_term_by_role(
element, member2_arg, is_arg)
if member1_term is None or member2_term is None:
return None
member1 = self.get_event_or_migration(member1_term)
member2 = self.get_event_or_migration(member2_term)
if member1 is None or member2 is None:
return None
self.relation_events |= {member1_id, member2_id, element_id}
evidence = self._get_evidence(element)
return member1, member2, evidence, rel_type
def _association_from_element(self, element, element_type, member1_arg,
member2_arg, is_arg):
components = self._statement_components_from_element(
element, element_type, member1_arg, member2_arg, is_arg)
if components is None:
return None
member1, member2, evidence, _ = components
st = Association([member1, member2], evidence=[evidence])
return st
def event_from_event(self, event_term):
"""Return an Event from an EVENT element in the EKB."""
arg_id, arg_term = self._get_term_by_role(event_term, 'AFFECTED',
False)
if arg_term is None:
return None
# Make an Event statement if it is a standalone event
evidence = self._get_evidence(event_term)
event = self._get_event(arg_term, evidence=[evidence])
if event is None:
return None
event.context = self.get_context(event_term)
return event
def migration_from_event(self, event_term):
"""Return a Migration event from an EVENT element in the EKB."""
# First process at event level
migration_grounding = ('wm/concept/causal_factor/'
'social_and_political/migration')
concept_name = 'migration'
concept_db_refs = {'WM': migration_grounding}
# Get the element's text and use it to construct a Concept
element_text_element = event_term.find('text')
if element_text_element is not None:
element_text = element_text_element.text
concept_db_refs['TEXT'] = element_text
concept_name = sanitize_name(element_text)
concept = Concept(concept_name, db_refs=concept_db_refs)
evidence = self._get_evidence(event_term)
time = self._extract_time(event_term)
# Locations can be at different levels, keep expanding the list
locs = self._get_migration_locations(event_term)
neutral_id, neutral_term = self._get_term_by_role(event_term,
'NEUTRAL',
is_arg=False)
if neutral_term is not None:
locs = self._get_migration_locations(neutral_term, locs, 'origin')
# Arguments can be under AGENT or AFFECTED
agent_arg_id, agent_arg_term = self._get_term_by_role(
event_term, 'AGENT', False)
affected_arg_id, affected_arg_term = self._get_term_by_role(
event_term, 'AFFECTED', False)
if agent_arg_term is None and affected_arg_term is None:
context = MovementContext(locations=locs, time=time)
event = Migration(concept, context=context, evidence=[evidence])
return event
# If there are argument terms, extract more data from them
# Try to get the quantitative state associated with the event
size = None
for arg_term in [agent_arg_term, affected_arg_term]:
if arg_term is not None:
size_arg = arg_term.find('size')
if size_arg is not None and size_arg.attrib.get('id'):
size = self._get_size(size_arg.attrib['id'])
break
# Get more locations from arguments and inevents
if agent_arg_term is not None:
locs = self._get_migration_locations(
agent_arg_term, locs, 'destination')
inevent_term = self._get_inevent_term(agent_arg_term)
if inevent_term is not None:
locs = self._get_migration_locations(inevent_term, locs)
if time is None:
time = self._extract_time(inevent_term)
if size is None:
size = self._get_size_and_entity(inevent_term)
other_event_term = self._get_other_event_term(agent_arg_term)
if other_event_term is not None:
locs = self._get_migration_locations(other_event_term, locs)
if time is None:
time = self._extract_time(other_event_term)
if size is None:
size = self._get_size_and_entity(other_event_term)
if affected_arg_term is not None:
locs = self._get_migration_locations(
affected_arg_term, locs, 'destination')
context = MovementContext(locations=locs, time=time)
event = Migration(
concept, delta=size, context=context, evidence=[evidence])
return event
def _get_inevent_term(self, arg_term):
refset_arg = arg_term.find('refset')
if refset_arg is None:
return None
refset_id = refset_arg.attrib['id']
refset_term = self.tree.find("*[@id='%s']" % refset_id)
if refset_term is None:
return None
features = refset_term.find('features')
if features is None:
return None
inevent = features.find('inevent')
if inevent is None:
return None
inevent_id = inevent.attrib['id']
self.subsumed_events.add(inevent_id)
inevent_term = self.tree.find("*[@id='%s']" % inevent_id)
return inevent_term
def _get_other_event_term(self, arg_term):
refset_arg = arg_term.find('refset')
potential_events = self.tree.findall("EVENT/[type].//arg1/..") + \
self.tree.findall("EVENT/[type].//arg2/..")
for ev in potential_events:
arg1 = ev.find('arg1')
arg2 = ev.find('arg2')
for arg in [arg1, arg2]:
if arg is not None:
if refset_arg is not None:
if arg.attrib.get('id') == refset_arg.attrib.get('id'):
event_id = ev.attrib['id']
self.subsumed_events.add(event_id)
event_term = self.tree.find("*[@id='%s']"
% event_id)
return event_term
else:
# Refset might be on a different level
if arg.attrib.get('id'):
term = self.tree.find("*[@id='%s']" % arg.attrib['id'])
arg_refset_arg = term.find('refset')
if arg_refset_arg is not None:
if arg_refset_arg.attrib.get('id') == \
arg_term.attrib.get('id'):
event_id = ev.attrib['id']
self.subsumed_events.add(event_id)
event_term = self.tree.find("*[@id='%s']"
% event_id)
return event_term
return None
def _get_arg_event_term(self, term):
potential_args = term.findall('arg1') + term.findall('arg2')
for arg in potential_args:
if arg.attrib.get('id'):
new_term = self.tree.find("*[@id='%s']" % arg.attrib['id'])
if new_term is not None:
self.subsumed_events.add(new_term.attrib['id'])
return new_term
def _get_migration_locations(self, event_term, existing_locs=None,
default_role='unknown'):
if existing_locs is None:
existing_locs = []
new_locs = []
loc = self._extract_geoloc(event_term, arg_link='location')
if loc is not None:
new_locs.append({'location': loc,
'role': default_role})
loc = self._extract_geoloc(event_term, arg_link='to-location')
if loc is not None:
new_locs.append({'location': loc,
'role': 'destination'})
loc = self._extract_geoloc(event_term, arg_link='from-location')
if loc is not None:
new_locs.append({'location': loc,
'role': 'origin'})
for loc in new_locs:
if loc not in existing_locs:
existing_locs.append(loc)
return existing_locs
def _get_size(self, size_term_id):
size_term = self.tree.find("*[@id='%s']" % size_term_id)
value = size_term.find('value')
if value is None:
value = size_term.find('amount')
if value is not None:
mod = value.attrib.get('mod')
if mod and mod.lower() == 'almost':
mod = 'less_than'
value_txt = value.text
if value_txt is not None:
value_str = value.text.strip()
if value_str and not value_str.startswith('ONT') and \
not value_str.startswith('W'):
value = int(float(value_str))
else:
value = None
else:
value = None
unit = size_term.find('unit')
if unit is not None:
unit = unit.text.strip().lower()
else:
unit = 'absolute'
text = size_term.find('text').text
size = QuantitativeState(entity='person', value=value, unit=unit,
modifier=mod, text=text)
else:
size = None
return size
def _get_size_and_entity(self, event_term):
# For cases when entity (group) information and quantity are stored in
# different arguments and we can overwrite default 'person' entity
_, term1 = self._get_term_by_role(event_term, 'NEUTRAL', False)
_, term2 = self._get_term_by_role(event_term, 'NEUTRAL1', False)
size = None
if term1 is not None:
size_arg = term1.find('size')
if size_arg is not None and size_arg.attrib.get('id'):
size = self._get_size(size_arg.attrib['id'])
if size is not None and term2 is not None:
size.entity = term2.find('text').text
return size
def _get_term_by_role(self, term, role, is_arg):
"""Return the ID and the element corresponding to a role in a term."""
element = term.find("%s[@role=':%s']" % ('arg/' if is_arg else '*',
role))
if element is None:
return None, None
element_id = element.attrib.get('id')
if element_id is None:
return None, None
element_term = self.tree.find("*[@id='%s']" % element_id)
if element_term is None:
return None, None
return element_id, element_term
def _get_event(self, event_term, evidence=None):
"""Extract and Event from the given EKB element."""
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with_text(event_term)
# Get the element's text and use it to construct a Concept
element_text_element = event_term.find('text')
if element_text_element is None:
return None
element_text = element_text_element.text
if element_text is None:
return None
element_db_refs = {'TEXT': element_text.rstrip()}
element_name = sanitize_name(element_text.rstrip())
element_type_element = event_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
concept = Concept(element_name, db_refs=element_db_refs)
ev_type = event_term.find('type').text
polarity = POLARITY_DICT['EVENT'].get(ev_type)
delta = QualitativeDelta(polarity=polarity)
context = self.get_context(event_term)
event_obj = Event(concept, delta=delta, context=context,
evidence=evidence)
return event_obj
def _get_wm_grounding(self, element):
wm_gr = None
wm_type_element = element.find('wm-type')
if wm_type_element is not None:
grounding_element = wm_type_element.find('grounding')
if grounding_element is not None:
wm_gr = (grounding_element.text, 0.7)
return wm_gr
def _add_start_end(self, term, starts, ends):
start = term.attrib.get('start')
end = term.attrib.get('end')
if start:
starts.append(int(start))
if end:
ends.append(int(end))
return starts, ends
def get_event_or_migration(self, event_term):
#if event_term.find('type').text in [
# 'ONT::MOVE', 'ONT::DEPART', 'ONT::ARRIVE']:
# return self.migration_from_event(event_term)
#else:
return self._get_event(event_term)
def get_context(self, element):
time = self._extract_time(element)
geoloc = self._extract_geoloc(element)
if time or geoloc:
context = WorldContext(time=time, geo_location=geoloc)
else:
context = None
return context
def _extract_time(self, term):
time = term.find('time')
if time is None:
time = term.find('features/time')
if time is None:
return None
time_id = time.attrib.get('id')
time_term = self.tree.find("*[@id='%s']" % time_id)
if time_term is None:
return None
text = sanitize_name(time_term.findtext('text'))
timex = time_term.find('timex')
if timex is not None:
start = self._process_timex(timex)
if start is not None:
time_context = TimeContext(text=text, start=start)
else:
time_context = TimeContext(text=text)
else:
start = None
end = None
from_time_el = time_term.find('from-time')
to_time_el = time_term.find('to-time')
if from_time_el is not None:
from_time_id = from_time_el.attrib.get('id')
from_time_term = self.tree.find("*[@id='%s']" % from_time_id)
if time_term is not None:
timex = from_time_term.find('timex')
if timex is not None:
start = self._process_timex(timex)
if to_time_el is not None:
to_time_id = to_time_el.attrib.get('id')
to_time_term = self.tree.find("*[@id='%s']" % to_time_id)
if to_time_term is not None:
timex = to_time_term.find('timex')
if timex is not None:
end = self._process_timex(timex)
if start and end:
duration = int((end - start).total_seconds())
else:
duration = None
time_context = TimeContext(
text=text, start=start, end=end, duration=duration)
return time_context
@staticmethod
def _process_timex(timex):
year = timex.findtext('year')
month = timex.findtext('month')
day = timex.findtext('day')
if year or month or day:
try:
year = int(year)
except Exception:
year = None
try:
# Month can be represented either by name, short name or
# number (October, Oct or 10)
month = int(month)
except Exception:
try:
month = datetime.strptime(month, '%B').month
except Exception:
try:
month = datetime.strptime(month, '%b').month
except Exception:
month = 1
try:
day = int(day)
except Exception:
day = 1
if year and month and day:
time = datetime(year, month, day)
return time
return None
def _extract_geoloc(self, term, arg_link='location'):
"""Get the location from a term (CC or TERM)"""
loc = term.find(arg_link)
if loc is None:
return None
loc_id = loc.attrib.get('id')
loc_term = self.tree.find("*[@id='%s']" % loc_id)
if loc_term is None:
return None
text = loc_term.findtext('text')
grounding = loc_term.find('grounding')
db_refs = {}
if grounding is not None:
places = grounding.findall('place')
for place in places:
nsid = place.attrib.get('id')
db_ns, db_id = nsid.split(':')
if db_ns == 'GNO':
db_ns = 'GEOID'
# TODO: name spaces are sometimes repeated in the EKB, here we
# silently overwrite a key if it already exists
db_refs[db_ns] = db_id
# name = loc_term.findtext('name')
geoloc_context = RefContext(name=text, db_refs=db_refs)
return geoloc_context
def _get_assoc_with_text(self, element_term):
# NOTE: there could be multiple assoc-withs here that we may
# want to handle
assoc_with = element_term.find('assoc-with')
if assoc_with is not None:
# We first identify the ID of the assoc-with argument
assoc_with_id = assoc_with.attrib.get('id')
# In some cases the assoc-with has no ID but has a type
# defined in place that we can get
if assoc_with_id is None:
assoc_with_grounding = assoc_with.find('type').text
return assoc_with_grounding
# If the assoc-with has an ID then find the TERM
# corresponding to it
assoc_with_term = self.tree.find("*[@id='%s']" % assoc_with_id)
if assoc_with_term is not None:
# We then get the grounding for the term
assoc_with_grounding = assoc_with_term.find('type').text
return assoc_with_grounding
return None
def _get_assoc_with_term(self, element_term):
assoc_with = element_term.find('assoc-with')
if assoc_with is not None:
assoc_with_id = assoc_with.attrib.get('id')
if assoc_with_id is not None:
assoc_with_term = self.tree.find("*[@id='%s']" % assoc_with_id)
return assoc_with_term
def _get_evidence(self, event_tag):
text = self._get_evidence_text(event_tag)
sec = self._get_section(event_tag)
epi = {'direct': False}
if sec:
epi['section_type'] = sec
ev = Evidence(source_api='cwms', text=text, pmid=self.doc_id,
epistemics=epi)
return ev
def _get_evidence_text(self, event_tag):
"""Extract the evidence for an event.
Pieces of text linked to an EVENT are fragments of a sentence. The
EVENT refers to the paragraph ID and the "uttnum", which corresponds
to a sentence ID. Here we find and return the full sentence from which
the event was taken.
"""
par_id = event_tag.attrib.get('paragraph')
uttnum = event_tag.attrib.get('uttnum')
event_text = event_tag.find('text')
if self.sentences is not None and uttnum is not None:
sentence = self.sentences[uttnum]
elif event_text is not None:
sentence = event_text.text
else:
sentence = None
return sentence
def _get_section(self, event_tag):
par_id = event_tag.attrib.get('paragraph')
sec = self.par_to_sec.get(par_id)
return sec
def _remove_multi_extraction_artifacts(self):
# Build up a dict of evidence matches keys with statement UUIDs
evmks = {}
logger.debug('Starting with %d Statements.' % len(self.statements))
for stmt in self.statements:
if isinstance(stmt, Event):
evmk = stmt.evidence[0].matches_key() + \
stmt.concept.matches_key()
elif isinstance(stmt, Influence):
evmk = (stmt.evidence[0].matches_key() +
stmt.subj.matches_key() + stmt.obj.matches_key())
elif isinstance(stmt, Association):
evmk = (stmt.evidence[0].matches_key() +
stmt.members[0].matches_key() +
stmt.members[1].matches_key())
if evmk not in evmks:
evmks[evmk] = [stmt.uuid]
else:
evmks[evmk].append(stmt.uuid)
# This is a list of groups of statement UUIDs that are redundant
multi_evmks = [v for k, v in evmks.items() if len(v) > 1]
# We now figure out if anything needs to be removed
to_remove = []
# Remove redundant statements
for uuids in multi_evmks:
# Influence statements to be removed
infl_stmts = [s for s in self.statements if (
s.uuid in uuids and isinstance(s, Influence))]
infl_stmts = sorted(infl_stmts, key=lambda x: x.polarity_count(),
reverse=True)
to_remove += [s.uuid for s in infl_stmts[1:]]
# Association statements to be removed
assn_stmts = [s for s in self.statements if (
s.uuid in uuids and isinstance(s, Association))]
assn_stmts = sorted(assn_stmts, key=lambda x: x.polarity_count(),
reverse=True)
# Standalone events to be removed
events = [s for s in self.statements if (
s.uuid in uuids and isinstance(s, Event))]
events = sorted(events, key=lambda x: event_delta_score(x),
reverse=True)
to_remove += [e.uuid for e in events[1:]]
# Remove all redundant statements
if to_remove:
logger.debug('Found %d Statements to remove' % len(to_remove))
self.statements = [s for s in self.statements
if s.uuid not in to_remove]
class CWMSProcessorCompositional(CWMSProcessor):
def _get_event(self, event_term, evidence=None):
"""Extract and Event from the given EKB element."""
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with_text(event_term)
# We're using a union of texts from multiple terms instead
# Get the element's text and use it to construct a Concept
# element_text_element = event_term.find('text')
# if element_text_element is None:
# return None
# element_text = element_text_element.text
# element_db_refs = {'TEXT': element_text}
# element_name = sanitize_name(element_text)
element_db_refs = {}
par = event_term.attrib['paragraph']
starts, ends = self._add_start_end(event_term, [], [])
element_type_element = event_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
theme_gr, theme_prop_gr, theme_proc_gr, theme_proc_prop_gr = \
None, None, None, None
# Grounding can be provided on multiple levels
theme_gr = self._get_wm_grounding(event_term)
if not theme_gr:
arg_term = self._get_arg_event_term(event_term)
if arg_term is not None:
starts, ends = self._add_start_end(arg_term, starts, ends)
assoc_term = self._get_assoc_with_term(arg_term)
if assoc_term is not None:
starts, ends = self._add_start_end(
assoc_term, starts, ends)
new_arg_term = self._get_arg_event_term(assoc_term)
# Theme grounding is usually at the "deepest" level
if new_arg_term is not None:
starts, ends = self._add_start_end(
new_arg_term, starts, ends)
theme_gr = self._get_wm_grounding(new_arg_term)
theme_proc_gr = self._get_wm_grounding(assoc_term)
theme_proc_prop_gr = self._get_wm_grounding(arg_term)
else:
theme_gr = self._get_wm_grounding(assoc_term)
extra_gr = self._get_wm_grounding(arg_term)
# This can be process or property, look at ontology
if extra_gr:
if 'process' in extra_gr[0]:
theme_proc_gr = extra_gr
else:
theme_prop_gr = extra_gr
# Get a union of all texts
element_text = self.paragraphs[par][min(starts): max(ends)].rstrip()
element_db_refs['TEXT'] = element_text
element_name = sanitize_name(element_text)
# Promote process grounding to theme if theme is missing
if not theme_gr and theme_proc_gr:
theme_gr = theme_proc_gr
theme_proc_gr = None
# Drop process property grounding in process is missing
if not theme_proc_gr:
theme_proc_prop_gr = None
# Only add WM grounding if there's a theme grounding
if theme_gr:
element_db_refs['WM'] = [(theme_gr, theme_prop_gr, theme_proc_gr,
theme_proc_prop_gr)]
concept = Concept(element_name, db_refs=element_db_refs)
ev_type = event_term.find('type').text
polarity = POLARITY_DICT['EVENT'].get(ev_type)
delta = QualitativeDelta(polarity=polarity)
context = self.get_context(event_term)
event_obj = Event(concept, delta=delta, context=context,
evidence=evidence)
return event_obj
def sanitize_name(txt):
name = txt.replace('\n', '')
return name
def event_delta_score(stmt):
if stmt.delta is None:
return 0
pol_score = 1 if stmt.delta.polarity is not None else 0
if isinstance(stmt.delta, QualitativeDelta):
adj_score = len(stmt.delta.adjectives)
return (pol_score + adj_score)
if isinstance(stmt.delta, QuantitativeState):
value_score = 1 if stmt.delta.value is not None else 0
return (pol_score + value_score)
|
py | 1a34e67bbed7d4659482659d167a66b8102e6dcc | from core import models
from django.contrib.auth import get_user_model
from django.test import TestCase
from unittest.mock import patch
def sample_user(email='[email protected]', password='testpass'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = '[email protected]'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normallized(self):
email = '[email protected]'
user = get_user_model().objects.create_user(email, 'tst21132')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
""""""
user = get_user_model().objects.create_superuser(
'[email protected]', 'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_create_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
# this set() fx is the __str__ function in the
# Tag.__str__ method in the models.py file.
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
|
py | 1a34e68c90adc5afb4ba7761ad5d3ec585653278 | import logging
import re
import typing as t
import requests
from . import const
HOST = "https://{endpoint}.api.pvp.net"
# Commonly used types (for type hints)
Params = t.Dict[str, str]
JSON = t.Dict[str, t.Any]
l = logging.getLogger(__name__)
api_key = None # type: str
###################################################################################################
def _build_url(url_base: str, region: str, **kwargs: t.Any):
if url_base.startswith("/"):
url_base = HOST + url_base
kwargs.setdefault('endpoint', region)
kwargs.setdefault('region', region)
kwargs.setdefault('platform', const.Platform[region])
return url_base.format(**kwargs)
def _get_data(url: str, params: Params = None) -> JSON:
if not params:
params = {}
params.setdefault('api_key', api_key)
l.debug("Requesting '%s' with params: %s", url, params)
r = requests.get(url, params=params)
return r.json()
def _staticdata(variant: str, params: Params = None, region="euw") -> JSON:
url = _build_url("/api/lol/static-data/{region}/v1.2/{variant}",
region=region, endpoint='global', variant=variant)
return _get_data(url, params)
def _standardize_summoner_name(summoner_name: str) -> str:
# The standardized summoner name
# is the summoner name in all lower case
# and with spaces removed.
return re.sub(r"\s", "", summoner_name.lower())
###################################################################################################
def set_key(key: str):
global api_key
api_key = key
def format_status(data: JSON) -> str:
return "Status code: {status_code}, message: {message}".format(**data['status'])
def get_champions(params: Params = None) -> JSON:
return _staticdata("champion", params)
def get_versions() -> JSON:
return _staticdata("versions")
def get_summoner_id(region: str, summoner_name: str) -> t.Optional[int]:
"""Determine ID of a summoner by name.
Returns None if summoner name is not found.
"""
standardized_name = _standardize_summoner_name(summoner_name)
url = _build_url("/api/lol/{region}/v1.4/summoner/by-name/{summoner_name}",
region=region, summoner_name=standardized_name)
result = _get_data(url)
if standardized_name not in result:
return None
else:
return result[standardized_name]['id']
def get_current_game_info(region: str, summoner_id: int) -> t.Optional[JSON]:
url = _build_url("/observer-mode/rest/consumer/getSpectatorGameInfo/{platform}/{summoner_id}",
region=region, summoner_id=summoner_id)
result = _get_data(url)
if 'status' in result:
if result['status']['status_code'] == 404: # not in-game
return None
else:
l.error("Non-standard result! %s", format_status(result))
return None
else:
return result
|
py | 1a34e80e3051ae7fed773ced6b43c54b7af3345a | #!/usr/bin/env python3
import random
import os
import asyncpg
from quart import Quart, jsonify, make_response, request, render_template
app = Quart(__name__)
GET_WORLD = "select id,randomnumber from world where id = $1"
UPDATE_WORLD = "update world set randomNumber = $2 where id = $1"
@app.before_serving
async def connect_to_db():
app.db = await asyncpg.create_pool(
user=os.getenv("PGUSER", "benchmarkdbuser"),
password=os.getenv("PGPASS", "benchmarkdbpass"),
database="hello_world",
host="tfb-database",
port=5432,
)
@app.after_serving
async def disconnect_from_db():
await app.db.close()
@app.route("/json")
async def json():
return {"message": "Hello, World!"}
@app.route("/plaintext")
async def plaintext():
response = await make_response(b"Hello, World!")
# Quart assumes string responses are 'text/html', so make a custom one
response.mimetype = "text/plain"
return response
@app.route("/db")
async def db():
async with app.db.acquire() as conn:
key = random.randint(1, 10000)
number = await conn.fetchval(GET_WORLD, key)
return jsonify({"id": key, "randomNumber": number})
def get_query_count(args):
qc = args.get("queries")
if qc is None:
return 1
try:
qc = int(qc)
except ValueError:
return 1
qc = max(qc, 1)
qc = min(qc, 500)
return qc
@app.route("/queries")
async def queries():
queries = get_query_count(request.args)
worlds = []
async with app.db.acquire() as conn:
pst = await conn.prepare(GET_WORLD)
for _ in range(queries):
key = random.randint(1, 10000)
number = await pst.fetchval(key)
worlds.append({"id": key, "randomNumber": number})
return jsonify(worlds)
@app.route("/updates")
async def updates():
queries = get_query_count(request.args)
new_worlds = []
async with app.db.acquire() as conn, conn.transaction():
pst = await conn.prepare(GET_WORLD)
for _ in range(queries):
key = random.randint(1, 10000)
old_number = await pst.fetchval(key)
new_number = random.randint(1, 10000)
new_worlds.append((key, new_number))
await conn.executemany(UPDATE_WORLD, new_worlds)
return jsonify(
[{"id": key, "randomNumber": new_number} for key, new_number in new_worlds]
)
@app.route("/fortunes")
async def fortunes():
async with app.db.acquire() as conn:
rows = await conn.fetch("select * from fortune")
rows.append((0, "Additional fortune added at request time."))
rows.sort(key=lambda row: row[1])
return await render_template("fortunes.html", fortunes=rows)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
py | 1a34e8e832596b5f5a2b1ac3a6ccef109b2c6164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib as ml
import matplotlib.pyplot as plt
import _settings
import os.path
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import colors, ticker, cm
from math import log10
def logspace_for(data, n=20):
"""log10-spaced bins for some data"""
data = np.asarray(data)
top = data.max()
bot = data.min()
logtop = log10(top)
logbot = log10(bot)
logspace = np.linspace(logbot, logtop, n, endpoint=True)
space = 10.0 ** logspace
space[0]=bot
space[-1]=top
return space
def contour_cum_histo(x, y,
title="",
xlabel="",
ylabel="",
n_bins=50,
n_levels=10,
x_cap_pc=100.0,
y_cap_pc=100.0,
cmap=None):
"histogram with approx equal-occupancy contour lines"
if cmap is None: cmap = plt.cm.bone_r
x_cap = np.percentile(x, x_cap_pc)
y_cap = np.percentile(y, y_cap_pc)
mask = (x<=x_cap) & (y<=y_cap)
x_capped = x[mask]
y_capped = y[mask]
H, xedges, yedges = np.histogram2d(
x_capped, y_capped,
bins=(n_bins, n_bins),
normed=True)
H_sorted = np.sort(H.flatten())
H_cum = H_sorted.cumsum()
# more precise version at https://gist.github.com/adrn/3993992
levels = H_sorted[H_cum.searchsorted(np.linspace(1.0/n_levels*H_cum[-1], H_cum[-1], n_levels, endpoint=True))]
level_labels = np.linspace(0, 100.0*(1-1.0/n_levels), n_levels, endpoint=True)
#lowest_bin = np.percentile(H[H>0].flatten(), 5.0) #Ignore bottom 5%
#levels = np.power(10,np.arange(np.ceil(np.log(lowest_bin)),np.ceil(np.log(H.max())), 0.5))
#levels = np.concatenate([[0.0], levels])
#levels = np.percentile(H.flatten(), np.linspace(0.0, 100.0, n_levels, endpoint=True))
#extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] #axes transposed for histograms
fig = plt.figure()
ax = plt.gca()
#points = plt.scatter(
# y_capped, x_capped,
# marker="x"
#)
cset = plt.contourf(H,
levels=levels,
cmap=cmap,
#origin='lower',
#colors=['black','green','blue','red'],
#locator=ticker.LogLocator(),
#linewidths=(1.9, 1.6, 1.5, 1.4),
extent=extent
)
fset = plt.contour(H,
levels=levels,
#origin='lower',
colors=['red'],
#locator=ticker.LogLocator(),
#linewidths=(1.9, 1.6, 1.5, 1.4),
extent=extent,
hold='on'
)
# Make a colorbar for the ContourSet returned by the contourf call.
#cbar = plt.colorbar(cset)
#cbar.ax.set_ylabel('verbosity coefficient')
# Add the contour line levels to the colorbar
#cbar.add_lines(fset)
#plt.clabel(cset, inline=1, fontsize=10, fmt='%1.0i')
#for c in cset.collections:
# c.set_linestyle(‘solid’)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig, ax, cset, fset #, cbar
def counts_for(timestamps):
"small helper to create an index vector"
return np.arange(1, timestamps.size+1)
def plot_timestamps(timestamps, ax=None, **kwargs):
ax = ax if ax else plt.gca()
return plt.plot(timestamps, counts_for(timestamps), **kwargs)
def plot_point_ts_series(tseries, ax=None, **kwargs):
ax = ax if ax else plt.gca()
return plt.plot(tseries.index, tseries, **kwargs)
def plot_ts(ts_frame, ax=None, **kwargs):
ax = ax if ax else plt.gca()
return ax.plot(ts_frame.run_time, ts_frame.view_count, **kwargs)
def multisave(basename, fig=None, dpi=300, **kwargs):
basedir = getattr(_settings, 'FIGURES', None)
fig = fig if fig else plt.gcf()
if basedir:
basename = os.path.join(basedir, basename)
#Aggressively prevent file handle leakage
with open(basename + ".png", "w") as h:
fig.savefig(h, format="png", dpi=dpi)
with open(basename + ".pdf", "w") as h:
fig.savefig(h, format="pdf")
with open(basename + ".svg", "w") as h:
fig.savefig(h, format="svg")
#return fig
def plot_ts_rates(ts_frame, ax=None,
title=None,
scale=3600*24, **kwargs):
ax = ax if ax else plt.gca()
vid = ts_frame.iloc[0,0]
if title is None:
title = "Estimated rate for {!r}".format(vid)
ax.step(
pd.to_datetime(ts_frame.run_time[1:] * scale, unit='s'),
ts_frame.rate[1:],
**kwargs)
#ax.set_xlabel('time')
ax.set_ylabel('approx. intensity (views/day)')
ax.set_title(title)
ax.figure.autofmt_xdate()
return ax
def diagnose_ts(ts_frame, **kwargs):
fig, axes = plt.subplots(nrows=1, ncols=2)
ax = axes[0]
ax.plot(x, y, 'r')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig.tight_layout()
return fig, axes
|
py | 1a34e8f04735da20b2d564329f0ca33519c082d6 | from base import PerfBaseDpdk
import time
from tcutils.wrappers import preposttest_wrapper
import test
class PerfDpdkTest(PerfBaseDpdk):
@classmethod
def setUpClass(cls):
super(PerfDpdkTest, cls).setUpClass()
# @preposttest_wrapper
# def test_perfdpdk_tcp_vm_to_vm_differ_compute(self):
# return self.run_perf_tests('THROUGHPUT','different','TCP','v4')
# @preposttest_wrapper
# def test_perfdpdk_tcp_vm_to_vm_same_compute(self):
# return self.run_perf_tests('THROUGHPUT','same','TCP','v4')
# @preposttest_wrapper
# def test_ixia_perfdpdk_tcp_vm_to_vm_compute_2_1si(self):
# return self.run_ixia_perf_tests('THROUGHPUT','different','TCP','v4',2,1)
@preposttest_wrapper
def test_ixia_perfdpdk_tcp_vm_to_vm_compute_4_1si(self):
return self.run_ixia_perf_tests('THROUGHPUT','different','TCP','v4',4,1)
@preposttest_wrapper
def test_ixia_perfdpdk_tcp_vm_to_vm_compute_8_1si(self):
return self.run_ixia_perf_tests('THROUGHPUT','different','TCP','v4',8,1)
# @preposttest_wrapper
# def test_ixia_perfdpdk_tcp_vm_to_vm_compute_2_2si(self):
# return self.run_ixia_perf_tests('THROUGHPUT','different','TCP','v4',2,2)
@preposttest_wrapper
def test_ixia_perfdpdk_tcp_vm_to_vm_compute_4_2si(self):
return self.run_ixia_perf_tests('THROUGHPUT','different','TCP','v4',4,2)
# @preposttest_wrapper
# def test_ixia_perfdpdk_tcp_vm_to_vm_compute_2_4si(self):
# return self.run_ixia_perf_tests('THROUGHPUT','different','TCP','v4',2,4)
"""
@preposttest_wrapper
def test_perfdpdk_udp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('THROUGHPUT','different','UDP','v4')
@preposttest_wrapper
def test_perfdpdk_udp_vm_to_vm_same_compute(self):
return self.run_perf_tests('THROUGHPUT','same','UDP','v4')
@preposttest_wrapper
def test_latdpdk_tcp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('LATENCY','different','TCP','v4')
@preposttest_wrapper
def test_latdpdk_udp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('LATENCY','different','UDP','v4')
@preposttest_wrapper
def test_latdpdk_tcp_vm_to_vm_same_compute(self):
return self.run_perf_tests('LATENCY','same','TCP','v4')
@preposttest_wrapper
def test_latdpdk_udp_vm_to_vm_same_compute(self):
return self.run_perf_tests('LATENCY','same','UDP','v4')
@preposttest_wrapper
def test_perf6dpdk_tcp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('THROUGHPUT','different','TCP','v6')
@preposttest_wrapper
def test_perf6dpdk_udp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('THROUGHPUT','different','UDP','v6')
@preposttest_wrapper
def test_perf6dpdk_tcp_vm_to_vm_same_compute(self):
return self.run_perf_tests('THROUGHPUT','same','TCP','v6')
@preposttest_wrapper
def test_perf6dpdk_udp_vm_to_vm_same_compute(self):
return self.run_perf_tests('THROUGHPUT','same','UDP','v6')
@preposttest_wrapper
def test_lat6dpdk_tcp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('LATENCY','different','TCP','v6')
@preposttest_wrapper
def test_lat6dpdk_udp_vm_to_vm_differ_compute(self):
return self.run_perf_tests('LATENCY','different','UDP','v6')
@preposttest_wrapper
def test_lat6dpdk_tcp_vm_to_vm_same_compute(self):
return self.run_perf_tests('LATENCY','same','TCP','v6')
@preposttest_wrapper
def test_lat6dpdk_udp_vm_to_vm_same_compute(self):
return self.run_perf_tests('LATENCY','same','UDP','v6')
"""
#end PerfDpdkTest
|
py | 1a34e9427e3cd42cb3f03d9f196fd2557c6fe963 | import pandas as pd
import numpy as np
import random
import logging
import cv2
import sys
sys.path.append("../DataProcessing/")
from ImageTransformer import ImageTransformer
class DataProcessor:
@staticmethod
def ProcessTrainData(trainPath, image_height, image_width, isGray = False, isExtended=False):
"""Reads the .pickle file and converts it into a format suitable fot training
Parameters
----------
trainPath : str
The file location of the .pickle
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
isExtended : bool, optional
True if the dataset contains both head and hand pose and you wish to retrieve both
Returns
-------
list
list of video frames and list of labels (poses)
"""
train_set = pd.read_pickle(trainPath).values
logging.info('[DataProcessor] train shape: ' + str(train_set.shape))
size = len(train_set[:, 0])
n_val = int(float(size) * 0.2)
#n_val = 13000
np.random.seed(100)
# split between train and test sets:
x_train = train_set[:, 0]
x_train = np.vstack(x_train[:]).astype(np.float32)
if isGray == True:
x_train = np.reshape(x_train, (-1, image_height, image_width, 1))
else:
x_train = np.reshape(x_train, (-1, image_height, image_width, 3))
x_train= np.swapaxes(x_train, 1, 3)
x_train = np.swapaxes(x_train, 2, 3)
y_train = train_set[:, 1]
y_train = np.vstack(y_train[:]).astype(np.float32)
ix_val, ix_tr = np.split(np.random.permutation(train_set.shape[0]), [n_val])
x_validation = x_train[ix_val, :]
x_train = x_train[ix_tr, :]
y_validation = y_train[ix_val, :]
y_train = y_train[ix_tr, :]
shape_ = len(x_train)
sel_idx = random.sample(range(0, shape_), k=(size-n_val))
#sel_idx = random.sample(range(0, shape_), k=50000)
x_train = x_train[sel_idx, :]
y_train = y_train[sel_idx, :]
if isExtended == True:
z_train = train_set[:, 2]
z_train = np.vstack(z_train[:]).astype(np.float32)
z_validation = z_train[ix_val, :]
z_train = z_train[ix_tr, :]
z_train = z_train[sel_idx, :]
return [x_train, x_validation, y_train, y_validation, z_train, z_validation]
return [x_train, x_validation, y_train, y_validation]
@staticmethod
def ProcessTestData(testPath, image_height, image_width, isGray = False, isExtended=False):
"""Reads the .pickle file and converts it into a format suitable fot testing
Parameters
----------
testPath : str
The file location of the .pickle
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
isExtended : bool, optional
True if the dataset contains both head and hand pose and you wish to retrieve both
Returns
-------
list
list of video frames and list of labels (poses)
"""
test_set = pd.read_pickle(testPath).values
logging.info('[DataProcessor] test shape: ' + str(test_set.shape))
x_test = test_set[:, 0]
x_test = np.vstack(x_test[:]).astype(np.float32)
if isGray == True:
x_test = np.reshape(x_test, (-1, image_height, image_width, 1))
else:
x_test = np.reshape(x_test, (-1, image_height, image_width, 3))
x_test = np.swapaxes(x_test, 1, 3)
x_test = np.swapaxes(x_test, 2, 3)
y_test = test_set[:, 1]
y_test = np.vstack(y_test[:]).astype(np.float32)
if isExtended ==True:
z_test = test_set[:, 2]
z_test = np.vstack(z_test[:]).astype(np.float32)
return [x_test, y_test, z_test]
return [x_test, y_test]
@staticmethod
def ExtractValidationLabels(testPath, image_height, image_width, isGray = False):
"""Reads the .pickle file and converts it into a format suitable for testing on pulp
You need to create a folder called test though
Parameters
----------
testPath : str
The file location of the .pickle
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
"""
test_set = pd.read_pickle(testPath).values
logging.info('[DataProcessor] test shape: ' + str(test_set.shape))
x_test = test_set[:, 0]
x_test = np.vstack(x_test[:]).astype(np.float32)
if isGray == True:
x_test = np.reshape(x_test, (-1, image_height, image_width, 1))
else:
x_test = np.reshape(x_test, (-1, image_height, image_width, 3))
x_test = np.swapaxes(x_test, 1, 3)
x_test = np.swapaxes(x_test, 2, 3)
y_test = test_set[:, 1]
y_test = np.vstack(y_test[:]).astype(np.float32)
f = open("test/labels.txt", "w")
for i in range(0, len(x_test)):
data = x_test[i]
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 0, 1)
data = np.reshape(data, (60, 108))
img = np.zeros((244, 324), np.uint8)
img[92:152, 108:216] = data
cv2.imwrite("test/{}.pgm".format(i), img)
label = y_test[i]
f.write("{},{},{},{}\n".format(label[0], label[1],label[2],label[3]))
f.close()
@staticmethod
def ProcessInferenceData(images, image_height, image_width, isGray=False):
"""Converts a list of images into a format suitable fot inference
Parameters
----------
images : list
list of images
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
Returns
-------
list
list of video frames and list of labels (poses, which are garbage)
"""
x_test = np.stack(images, axis=0).astype(np.float32)
if isGray == True:
x_test = np.reshape(x_test, (-1, image_height, image_width, 1))
else:
x_test = np.reshape(x_test, (-1, image_height, image_width, 3))
x_test = np.swapaxes(x_test, 1, 3)
x_test = np.swapaxes(x_test, 2, 3)
y_test = [0, 0, 0, 0] * len(x_test)
y_test = np.vstack(y_test[:]).astype(np.float32)
y_test = np.reshape(y_test, (-1, 4))
return [x_test, y_test]
@staticmethod
def CreateGreyPickle(trainPath, image_height, image_width, file_name):
"""Converts Dario's RGB dataset to a gray + vignette dataset
Parameters
----------
images : list
list of images
image_height : int
Please...
image_width : int
Please...
file_name : str
name of the new .pickle
"""
train_set = pd.read_pickle(trainPath).values
logging.info('[DataProcessor] train shape: ' + str(train_set.shape))
# split between train and test sets:
x_train = train_set[:, 0]
x_train = np.vstack(x_train[:])
x_train = np.reshape(x_train, (-1, image_height, image_width, 3))
it = ImageTransformer()
x_train_grey = []
sigma = 50
mask = it.GetVignette(image_width, image_width, sigma)
for i in range(len(x_train)):
gray_image = cv2.cvtColor(x_train[i], cv2.COLOR_RGB2GRAY)
gray_image = gray_image * mask[24:84, 0:108]
gray_image = gray_image.astype(np.uint8)
x_train_grey.append(gray_image)
y_train = train_set[:, 1]
df = pd.DataFrame(data={'x': x_train_grey, 'y': y_train})
df.to_pickle(file_name)
|
py | 1a34e9c72ed531873f89da52ed4747ef22ec5b56 | # engine/reflection.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
import contextlib
from .base import Connectable
from .base import Connection
from .base import Engine
from .. import exc
from .. import inspection
from .. import sql
from .. import util
from ..sql import operators
from ..sql import schema as sa_schema
from ..sql.type_api import TypeEngine
from ..util import topological
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get("info_cache", None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, util.string_types)),
tuple((k, v) for k, v in kw.items() if k != "info_cache"),
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
@inspection._self_inspects
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the reflection methods of the
:class:`~sqlalchemy.engine.interfaces.Dialect`, providing a
consistent interface as well as caching support for previously
fetched metadata.
A :class:`.Inspector` object is usually created via the
:func:`.inspect` function, which may be passed an :class:`.Engine`
or a :class:`.Connection`::
from sqlalchemy import inspect, create_engine
engine = create_engine('...')
insp = inspect(engine)
Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` associated
with the engine may opt to return an :class:`.Inspector` subclass that
provides additional methods specific to the dialect's target database.
"""
@util.deprecated(
"1.4",
"The __init__() method on :class:`.Inspector` is deprecated and "
"will be removed in a future release. Please use the "
":func:`.sqlalchemy.inspect` "
"function on an :class:`.Engine` or :class:`.Connection` in order to "
"acquire an :class:`.Inspector`.",
)
def __init__(self, bind):
"""Initialize a new :class:`.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
For a dialect-specific instance of :class:`.Inspector`, see
:meth:`.Inspector.from_engine`
"""
return self._init_legacy(bind)
@classmethod
def _construct(cls, init, bind):
if hasattr(bind.dialect, "inspector"):
cls = bind.dialect.inspector
self = cls.__new__(cls)
init(self, bind)
return self
def _init_legacy(self, bind):
if hasattr(bind, "exec_driver_sql"):
self._init_connection(bind)
else:
self._init_engine(bind)
def _init_engine(self, engine):
self.bind = self.engine = engine
engine.connect().close()
self._op_context_requires_connect = True
self.dialect = self.engine.dialect
self.info_cache = {}
def _init_connection(self, connection):
self.bind = connection
self.engine = connection.engine
self._op_context_requires_connect = False
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
@util.deprecated(
"1.4",
"The from_engine() method on :class:`.Inspector` is deprecated and "
"will be removed in a future release. Please use the "
":func:`.sqlalchemy.inspect` "
"function on an :class:`.Engine` or :class:`.Connection` in order to "
"acquire an :class:`.Inspector`.",
)
def from_engine(cls, bind):
"""Construct a new dialect-specific Inspector object from the given
engine or connection.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
This method differs from direct a direct constructor call of
:class:`.Inspector` in that the
:class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to
provide a dialect-specific :class:`.Inspector` instance, which may
provide additional methods.
See the example at :class:`.Inspector`.
"""
return cls._construct(cls._init_legacy, bind)
@inspection._inspects(Connectable)
def _connectable_insp(bind):
# this method should not be used unless some unusual case
# has subclassed "Connectable"
return Inspector._construct(Inspector._init_legacy, bind)
@inspection._inspects(Engine)
def _engine_insp(bind):
return Inspector._construct(Inspector._init_engine, bind)
@inspection._inspects(Connection)
def _connection_insp(bind):
return Inspector._construct(Inspector._init_connection, bind)
@contextlib.contextmanager
def _operation_context(self):
"""Return a context that optimizes for multiple operations on a single
transaction.
This essentially allows connect()/close() to be called if we detected
that we're against an :class:`.Engine` and not a :class:`.Connection`.
"""
if self._op_context_requires_connect:
conn = self.bind.connect()
else:
conn = self.bind
try:
yield conn
finally:
if self._op_context_requires_connect:
conn.close()
@contextlib.contextmanager
def _inspection_context(self):
"""Return an :class:`.Inspector` from this one that will run all
operations on a single connection.
"""
with self._operation_context() as conn:
sub_insp = self._construct(self.__class__._init_connection, conn)
sub_insp.info_cache = self.info_cache
yield sub_insp
@property
def default_schema_name(self):
"""Return the default schema name presented by the dialect
for the current engine's database user.
E.g. this is typically ``public`` for PostgreSQL and ``dbo``
for SQL Server.
"""
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, "get_schema_names"):
with self._operation_context() as conn:
return self.dialect.get_schema_names(
conn, info_cache=self.info_cache
)
return []
def get_table_names(self, schema=None):
"""Return all table names in referred to within a particular schema.
The names are expected to be real tables only, not views.
Views are instead returned using the :meth:`.Inspector.get_view_names`
method.
:param schema: Schema name. If ``schema`` is left at ``None``, the
database's default schema is
used, else the named schema is searched. If the database does not
support named schemas, behavior is undefined if ``schema`` is not
passed as ``None``. For special quoting, use :class:`.quoted_name`.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies. Does not automatically
resolve cycles, and will raise :class:`.CircularDependencyError`
if cycles exist.
.. seealso::
:meth:`.Inspector.get_sorted_table_and_fkc_names`
:attr:`.MetaData.sorted_tables`
"""
with self._operation_context() as conn:
return self.dialect.get_table_names(
conn, schema, info_cache=self.info_cache
)
def has_table(self, table_name, schema=None):
"""Return True if the backend has a table of the given name.
.. versionadded:: 1.4
"""
# TODO: info_cache?
with self._operation_context() as conn:
return self.dialect.has_table(conn, table_name, schema)
def get_sorted_table_and_fkc_names(self, schema=None):
"""Return dependency-sorted table and foreign key constraint names in
referred to within a particular schema.
This will yield 2-tuples of
``(tablename, [(tname, fkname), (tname, fkname), ...])``
consisting of table names in CREATE order grouped with the foreign key
constraint names that are not detected as belonging to a cycle.
The final element
will be ``(None, [(tname, fkname), (tname, fkname), ..])``
which will consist of remaining
foreign key constraint names that would require a separate CREATE
step after-the-fact, based on dependencies between tables.
.. versionadded:: 1.0.-
.. seealso::
:meth:`.Inspector.get_table_names`
:func:`.sort_tables_and_constraints` - similar method which works
with an already-given :class:`.MetaData`.
"""
with self._operation_context() as conn:
tnames = self.dialect.get_table_names(
conn, schema, info_cache=self.info_cache
)
tuples = set()
remaining_fkcs = set()
fknames_for_table = {}
for tname in tnames:
fkeys = self.get_foreign_keys(tname, schema)
fknames_for_table[tname] = set([fk["name"] for fk in fkeys])
for fkey in fkeys:
if tname != fkey["referred_table"]:
tuples.add((fkey["referred_table"], tname))
try:
candidate_sort = list(topological.sort(tuples, tnames))
except exc.CircularDependencyError as err:
for edge in err.edges:
tuples.remove(edge)
remaining_fkcs.update(
(edge[1], fkc) for fkc in fknames_for_table[edge[1]]
)
candidate_sort = list(topological.sort(tuples, tnames))
return [
(tname, fknames_for_table[tname].difference(remaining_fkcs))
for tname in candidate_sort
] + [(None, list(remaining_fkcs))]
def get_temp_table_names(self):
"""return a list of temporary table names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
with self._operation_context() as conn:
return self.dialect.get_temp_table_names(
conn, info_cache=self.info_cache
)
def get_temp_view_names(self):
"""return a list of temporary view names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
with self._operation_context() as conn:
return self.dialect.get_temp_view_names(
conn, info_cache=self.info_cache
)
def get_table_options(self, table_name, schema=None, **kw):
"""Return a dictionary of options specified when the table of the
given name was created.
This currently includes some options that apply to MySQL tables.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
if hasattr(self.dialect, "get_table_options"):
with self._operation_context() as conn:
return self.dialect.get_table_options(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_view_names(
conn, schema, info_cache=self.info_cache
)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_view_definition(
conn, view_name, schema, info_cache=self.info_cache
)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
* ``name`` - the column's name
* ``type`` - the type of this column; an instance of
:class:`~sqlalchemy.types.TypeEngine`
* ``nullable`` - boolean flag if the column is NULL or NOT NULL
* ``default`` - the column's server default value - this is returned
as a string SQL expression.
* ``autoincrement`` - indicates that the column is auto incremented -
this is returned as a boolean or 'auto'
* ``comment`` - (optional) the commnet on the column. Only some
dialects return this key
* ``computed`` - (optional) when present it indicates that this column
is computed by the database. Only some dialects return this key.
Returned as a dict with the keys:
* ``sqltext`` - the expression used to generate this column returned
as a string SQL expression
* ``persisted`` - (optional) boolean that indicates if the column is
stored in the table
.. versionadded:: 1.3.16 - added support for computed reflection.
* ``dialect_options`` - (optional) a dict with dialect specific options
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
:return: list of dictionaries, each representing the definition of
a database column.
"""
with self._operation_context() as conn:
col_defs = self.dialect.get_columns(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def["type"]
if not isinstance(coltype, TypeEngine):
col_def["type"] = coltype()
return col_defs
def get_pk_constraint(self, table_name, schema=None, **kw):
"""Return information about primary key constraint on `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_pk_constraint(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
name
optional name of the foreign key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_foreign_keys(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
column_sorting
optional dict mapping column names to tuple of sort keywords,
which may include ``asc``, ``desc``, ``nullsfirst``, ``nullslast``.
.. versionadded:: 1.3.5
dialect_options
dict of dialect-specific index options. May not be present
for all dialects.
.. versionadded:: 1.0.0
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_indexes(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_unique_constraints(self, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_unique_constraints(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_table_comment(self, table_name, schema=None, **kw):
"""Return information about the table comment for ``table_name``.
Given a string ``table_name`` and an optional string ``schema``,
return table comment information as a dictionary with these keys:
text
text of the comment.
Raises ``NotImplementedError`` for a dialect that does not support
comments.
.. versionadded:: 1.2
"""
with self._operation_context() as conn:
return self.dialect.get_table_comment(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_check_constraints(self, table_name, schema=None, **kw):
"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
dialect_options
may or may not be present; a dictionary with additional
dialect-specific options for this CHECK constraint
.. versionadded:: 1.3.8
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
.. versionadded:: 1.1.0
"""
with self._operation_context() as conn:
return self.dialect.get_check_constraints(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
@util.deprecated_20(
":meth:`.Inspector.reflecttable`",
"The :meth:`.Inspector.reflecttable` method was renamed to "
":meth:`.Inspector.reflect_table`. This deprecated alias "
"will be removed in a future release.",
)
def reflecttable(self, *args, **kwargs):
"See reflect_table. This method name is deprecated"
return self.reflect_table(*args, **kwargs)
def reflect_table(
self,
table,
include_columns,
exclude_columns=(),
resolve_fks=True,
_extend_on=None,
):
"""Given a Table object, load its internal constructs based on
introspection.
This is the underlying method used by most dialects to produce
table reflection. Direct usage is like::
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.engine.reflection import Inspector
engine = create_engine('...')
meta = MetaData()
user_table = Table('user', meta)
insp = Inspector.from_engine(engine)
insp.reflect_table(user_table, None)
.. versionchanged:: 1.4 Renamed from ``reflecttable`` to
``reflect_table``
:param table: a :class:`~sqlalchemy.schema.Table` instance.
:param include_columns: a list of string column names to include
in the reflection process. If ``None``, all columns are reflected.
"""
if _extend_on is not None:
if table in _extend_on:
return
else:
_extend_on.add(table)
dialect = self.bind.dialect
with self._operation_context() as conn:
schema = conn.schema_for_object(table)
table_name = table.name
# get table-level arguments that are specifically
# intended for reflection, e.g. oracle_resolve_synonyms.
# these are unconditionally passed to related Table
# objects
reflection_options = dict(
(k, table.dialect_kwargs.get(k))
for k in dialect.reflection_options
if k in table.dialect_kwargs
)
# reflect table options, like mysql_engine
tbl_opts = self.get_table_options(
table_name, schema, **table.dialect_kwargs
)
if tbl_opts:
# add additional kwargs to the Table if the dialect
# returned them
table._validate_dialect_kwargs(tbl_opts)
if util.py2k:
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
found_table = False
cols_by_orig_name = {}
for col_d in self.get_columns(
table_name, schema, **table.dialect_kwargs
):
found_table = True
self._reflect_column(
table,
col_d,
include_columns,
exclude_columns,
cols_by_orig_name,
)
if not found_table:
raise exc.NoSuchTableError(table.name)
self._reflect_pk(
table_name, schema, table, cols_by_orig_name, exclude_columns
)
self._reflect_fk(
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
)
self._reflect_indexes(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_unique_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_check_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_table_comment(
table_name, schema, table, reflection_options
)
def _reflect_column(
self, table, col_d, include_columns, exclude_columns, cols_by_orig_name
):
orig_name = col_d["name"]
table.dispatch.column_reflect(self, table, col_d)
# fetch name again as column_reflect is allowed to
# change it
name = col_d["name"]
if (include_columns and name not in include_columns) or (
exclude_columns and name in exclude_columns
):
return
coltype = col_d["type"]
col_kw = dict(
(k, col_d[k])
for k in [
"nullable",
"autoincrement",
"quote",
"info",
"key",
"comment",
]
if k in col_d
)
if "dialect_options" in col_d:
col_kw.update(col_d["dialect_options"])
colargs = []
if col_d.get("default") is not None:
default = col_d["default"]
if isinstance(default, sql.elements.TextClause):
default = sa_schema.DefaultClause(default, _reflected=True)
elif not isinstance(default, sa_schema.FetchedValue):
default = sa_schema.DefaultClause(
sql.text(col_d["default"]), _reflected=True
)
colargs.append(default)
if "computed" in col_d:
computed = sa_schema.Computed(**col_d["computed"])
colargs.append(computed)
if "sequence" in col_d:
self._reflect_col_sequence(col_d, colargs)
cols_by_orig_name[orig_name] = col = sa_schema.Column(
name, coltype, *colargs, **col_kw
)
if col.key in table.primary_key:
col.primary_key = True
table.append_column(col)
def _reflect_col_sequence(self, col_d, colargs):
if "sequence" in col_d:
# TODO: mssql and sybase are using this.
seq = col_d["sequence"]
sequence = sa_schema.Sequence(seq["name"], 1, 1)
if "start" in seq:
sequence.start = seq["start"]
if "increment" in seq:
sequence.increment = seq["increment"]
colargs.append(sequence)
def _reflect_pk(
self, table_name, schema, table, cols_by_orig_name, exclude_columns
):
pk_cons = self.get_pk_constraint(
table_name, schema, **table.dialect_kwargs
)
if pk_cons:
pk_cols = [
cols_by_orig_name[pk]
for pk in pk_cons["constrained_columns"]
if pk in cols_by_orig_name and pk not in exclude_columns
]
# update pk constraint name
table.primary_key.name = pk_cons.get("name")
# tell the PKConstraint to re-initialize
# its column collection
table.primary_key._reload(pk_cols)
def _reflect_fk(
self,
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
):
fkeys = self.get_foreign_keys(
table_name, schema, **table.dialect_kwargs
)
for fkey_d in fkeys:
conname = fkey_d["name"]
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_columns = [
cols_by_orig_name[c].key if c in cols_by_orig_name else c
for c in fkey_d["constrained_columns"]
]
if exclude_columns and set(constrained_columns).intersection(
exclude_columns
):
continue
referred_schema = fkey_d["referred_schema"]
referred_table = fkey_d["referred_table"]
referred_columns = fkey_d["referred_columns"]
refspec = []
if referred_schema is not None:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
schema=referred_schema,
autoload_with=self.bind,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(
".".join([referred_schema, referred_table, column])
)
else:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
autoload_with=self.bind,
schema=sa_schema.BLANK_SCHEMA,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
if "options" in fkey_d:
options = fkey_d["options"]
else:
options = {}
table.append_constraint(
sa_schema.ForeignKeyConstraint(
constrained_columns,
refspec,
conname,
link_to_name=True,
**options
)
)
_index_sort_exprs = [
("asc", operators.asc_op),
("desc", operators.desc_op),
("nullsfirst", operators.nullsfirst_op),
("nullslast", operators.nullslast_op),
]
def _reflect_indexes(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d["name"]
columns = index_d["column_names"]
column_sorting = index_d.get("column_sorting", {})
unique = index_d["unique"]
flavor = index_d.get("type", "index")
dialect_options = index_d.get("dialect_options", {})
duplicates = index_d.get("duplicates_constraint")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting %s key for (%s), key covers omitted columns."
% (flavor, ", ".join(columns))
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
idx_cols = []
for c in columns:
try:
idx_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"%s key '%s' was not located in "
"columns for table '%s'" % (flavor, c, table_name)
)
continue
c_sorting = column_sorting.get(c, ())
for k, op in self._index_sort_exprs:
if k in c_sorting:
idx_col = op(idx_col)
idx_cols.append(idx_col)
sa_schema.Index(
name,
*idx_cols,
_table=table,
**dict(list(dialect_options.items()) + [("unique", unique)])
)
def _reflect_unique_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Unique Constraints
try:
constraints = self.get_unique_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
conname = const_d["name"]
columns = const_d["column_names"]
duplicates = const_d.get("duplicates_index")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting unique constraint key for (%s), "
"key covers omitted columns." % ", ".join(columns)
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_cols = []
for c in columns:
try:
constrained_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"unique constraint key '%s' was not located in "
"columns for table '%s'" % (c, table_name)
)
else:
constrained_cols.append(constrained_col)
table.append_constraint(
sa_schema.UniqueConstraint(*constrained_cols, name=conname)
)
def _reflect_check_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
try:
constraints = self.get_check_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
table.append_constraint(sa_schema.CheckConstraint(**const_d))
def _reflect_table_comment(
self, table_name, schema, table, reflection_options
):
try:
comment_dict = self.get_table_comment(table_name, schema)
except NotImplementedError:
return
else:
table.comment = comment_dict.get("text", None)
|
py | 1a34e9eff97985bacaf13ff25d488831ddf87f94 | from django.apps import AppConfig
class MarvelWorldConfig(AppConfig):
name = 'marvel_world'
|
py | 1a34ea1971fa504f7e3c9f61c44bbd1c6722e243 | import raccoon
raccoon.go_up()
raccoon.go_right(4)
raccoon.go_up(2)
raccoon.go_left(1)
raccoon.go_right() |
py | 1a34ea5cb8da55a621459762b7d6fd06fc12d568 | # Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
__all__ = ['Averager']
import time
import itertools
import numpy as np
from .filter import Filter
from auspex.log import logger
from auspex.parameter import Parameter, FloatParameter
from auspex.stream import InputConnector, OutputConnector, DataStreamDescriptor, DataAxis
def view_fields(a, names):
"""
`a` must be a numpy structured array.
`names` is the collection of field names to keep.
Returns a view of the array `a` (not a copy).
http://stackoverflow.com/questions/37079175/how-to-remove-a-column-from-a-structured-numpy-array-without-copying-it
"""
dt = a.dtype
formats = [dt.fields[name][0] for name in names]
offsets = [dt.fields[name][1] for name in names]
itemsize = a.dtype.itemsize
newdt = np.dtype(dict(names=names,
formats=formats,
offsets=offsets,
itemsize=itemsize))
b = a.view(newdt)
return b
def remove_fields(a, names):
"""
`a` must be a numpy structured array.
`names` is the collection of field names to remove.
Returns a view of the array `a` (not a copy).
http://stackoverflow.com/questions/37079175/how-to-remove-a-column-from-a-structured-numpy-array-without-copying-it
"""
dt = a.dtype
keep_names = [name for name in dt.names if name not in names]
return view_fields(a, keep_names)
class Averager(Filter):
"""Takes data and collapses along the specified axis."""
sink = InputConnector()
partial_average = OutputConnector()
source = OutputConnector()
final_variance = OutputConnector()
final_counts = OutputConnector()
axis = Parameter()
threshold = FloatParameter()
def __init__(self, axis=None, threshold=0.5, **kwargs):
super(Averager, self).__init__(**kwargs)
self.axis.value = axis
self.threshold.value = threshold
self.points_before_final_average = None
self.points_before_partial_average = None
self.sum_so_far = None
self.num_averages = None
self.passthrough = False
# Rate limiting for partial averages
self.last_update = time.time()
self.update_interval = 0.5
def update_descriptors(self):
logger.debug('Updating averager "%s" descriptors based on input descriptor: %s.', self.filter_name, self.sink.descriptor)
descriptor_in = self.sink.descriptor
names = [a.name for a in descriptor_in.axes]
self.axis.allowed_values = names
if self.axis.value is None:
self.axis.value = descriptor_in.axes[0].name
# Convert named axes to an index
if self.axis.value not in names:
raise ValueError("Could not find axis {} within the DataStreamDescriptor {}".format(self.axis.value, descriptor_in))
self.axis_num = descriptor_in.axis_num(self.axis.value)
logger.debug("Averaging over axis #%d: %s", self.axis_num, self.axis.value)
self.data_dims = descriptor_in.data_dims()
# If we only have a single point along this axis, then just pass the data straight through
if self.data_dims[self.axis_num] == 1:
logger.debug("Averaging over a singleton axis")
self.passthrough = True
if self.axis_num == len(descriptor_in.axes) - 1:
logger.debug("Performing scalar average!")
self.points_before_partial_average = 1
self.avg_dims = [1]
else:
self.points_before_partial_average = descriptor_in.num_points_through_axis(self.axis_num+1)
self.avg_dims = self.data_dims[self.axis_num+1:]
# If we get multiple final average simultaneously
self.reshape_dims = self.data_dims[self.axis_num:]
if self.axis_num > 0:
self.reshape_dims = [-1] + self.reshape_dims
self.mean_axis = self.axis_num - len(self.data_dims)
self.points_before_final_average = descriptor_in.num_points_through_axis(self.axis_num)
logger.debug("Points before partial average: %s.", self.points_before_partial_average)
logger.debug("Points before final average: %s.", self.points_before_final_average)
logger.debug("Data dimensions are %s", self.data_dims)
logger.debug("Averaging dimensions are %s", self.avg_dims)
# Define final axis descriptor
descriptor = descriptor_in.copy()
self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
logger.debug("Number of partial averages is %d", self.num_averages)
if len(descriptor.axes) == 0:
# We will be left with only a single point here!
descriptor.add_axis(DataAxis("result", [0]))
self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
self.current_avg_frame = np.zeros(self.points_before_final_average, dtype=descriptor.dtype)
self.partial_average.descriptor = descriptor
self.source.descriptor = descriptor
self.excited_counts = np.zeros(self.data_dims, dtype=np.int64)
# We can update the visited_tuples upfront if none
# of the sweeps are adaptive...
desc_out_dtype = descriptor_in.axis_data_type(with_metadata=True, excluding_axis=self.axis.value)
if not descriptor_in.is_adaptive():
vals = [a.points_with_metadata() for a in descriptor_in.axes if a.name != self.axis.value]
nested_list = list(itertools.product(*vals))
flattened_list = [tuple((val for sublist in line for val in sublist)) for line in nested_list]
descriptor.visited_tuples = np.core.records.fromrecords(flattened_list, dtype=desc_out_dtype)
else:
descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)
for stream in self.partial_average.output_streams:
stream.set_descriptor(descriptor)
stream.descriptor.buffer_mult_factor = 20
stream.end_connector.update_descriptors()
for stream in self.source.output_streams:
stream.set_descriptor(descriptor)
stream.end_connector.update_descriptors()
# Define variance axis descriptor
descriptor_var = descriptor_in.copy()
descriptor_var.data_name = "Variance"
descriptor_var.pop_axis(self.axis.value)
if descriptor_var.unit:
descriptor_var.unit = descriptor_var.unit + "^2"
descriptor_var.metadata["num_averages"] = self.num_averages
self.final_variance.descriptor= descriptor_var
# Define counts axis descriptor
descriptor_count = descriptor_in.copy()
descriptor_count.data_name = "Counts"
descriptor_count.dtype = np.float64
descriptor_count.pop_axis(self.axis.value)
descriptor_count.add_axis(DataAxis("state", [0,1]),position=0)
if descriptor_count.unit:
descriptor_count.unit = "counts"
descriptor_count.metadata["num_counts"] = self.num_averages
self.final_counts.descriptor = descriptor_count
if not descriptor_in.is_adaptive():
descriptor_var.visited_tuples = np.core.records.fromrecords(flattened_list, dtype=desc_out_dtype)
else:
descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)
for stream in self.final_variance.output_streams:
stream.set_descriptor(descriptor_var)
stream.end_connector.update_descriptors()
for stream in self.final_counts.output_streams:
stream.set_descriptor(descriptor_count)
stream.end_connector.update_descriptors()
def final_init(self):
if self.points_before_final_average is None:
raise Exception("Average has not been initialized. Run 'update_descriptors'")
self.completed_averages = 0
self.idx_frame = 0
self.idx_global = 0
# We only need to accumulate up to the averaging axis
# BUT we may get something longer at any given time!
self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)
def process_data(self, data):
if self.passthrough:
for os in self.source.output_streams:
os.push(data)
for os in self.final_variance.output_streams:
os.push(data*0.0)
for os in self.partial_average.output_streams:
os.push(data)
return
# TODO: handle unflattened data separately
if len(data.shape) > 1:
data = data.flatten()
#handle single points
elif not isinstance(data, np.ndarray) and (data.size == 1):
data = np.array([data])
if self.carry.size > 0:
data = np.concatenate((self.carry, data))
self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)
idx = 0
while idx < data.size:
#check whether we have enough data to fill an averaging frame
if data.size - idx >= self.points_before_final_average:
#logger.debug("Have {} points, enough for final avg.".format(data.size))
# How many chunks can we process at once?
num_chunks = int((data.size - idx)/self.points_before_final_average)
new_points = num_chunks*self.points_before_final_average
reshaped = data[idx:idx+new_points].reshape(self.reshape_dims)
averaged = reshaped.mean(axis=self.mean_axis)
idx += new_points
# do state assignment
excited_states = (np.real(reshaped) > self.threshold.value).sum(axis=self.mean_axis)
ground_states = self.num_averages - excited_states
if self.sink.descriptor.is_adaptive():
new_tuples = self.sink.descriptor.tuples()[self.idx_global:self.idx_global + new_points]
new_tuples_stripped = remove_fields(new_tuples, self.axis.value)
take_axis = -1 if self.axis_num > 0 else 0
reduced_tuples = new_tuples_stripped.reshape(self.reshape_dims).take((0,), axis=take_axis)
self.idx_global += new_points
# Add to Visited tuples
if self.sink.descriptor.is_adaptive():
for os in self.source.output_streams + self.final_variance.output_streams + self.partial_average.output_streams:
os.descriptor.visited_tuples = np.append(os.descriptor.visited_tuples, reduced_tuples)
for os in self.source.output_streams:
os.push(averaged)
for os in self.final_variance.output_streams:
os.push(reshaped.var(axis=self.mean_axis, ddof=1)) # N-1 in the denominator
for os in self.partial_average.output_streams:
os.push(averaged)
for os in self.final_counts.output_streams:
os.push(ground_states)
os.push(excited_states)
# Maybe we can fill a partial frame
elif data.size - idx >= self.points_before_partial_average:
# logger.info("Have {} points, enough for partial avg.".format(data.size))
# How many chunks can we process at once?
num_chunks = int((data.size - idx)/self.points_before_partial_average)
new_points = num_chunks*self.points_before_partial_average
# Find the appropriate dimensions for the partial
partial_reshape_dims = self.reshape_dims[:]
partial_reshape_dims[self.mean_axis] = -1
partial_reshape_dims = partial_reshape_dims[self.mean_axis:]
reshaped = data[idx:idx+new_points].reshape(partial_reshape_dims)
summed = reshaped.sum(axis=self.mean_axis)
self.sum_so_far += summed
self.current_avg_frame[self.idx_frame:self.idx_frame+new_points] = data[idx:idx+new_points]
idx += new_points
self.idx_frame += new_points
self.completed_averages += num_chunks
# If we now have enoough for the final average, push to both partial and final...
if self.completed_averages == self.num_averages:
reshaped = self.current_avg_frame.reshape(partial_reshape_dims)
for os in self.source.output_streams + self.partial_average.output_streams:
os.push(reshaped.mean(axis=self.mean_axis))
for os in self.final_variance.output_streams:
os.push(np.real(reshaped).var(axis=self.mean_axis, ddof=1)+1j*np.imag(reshaped).var(axis=self.mean_axis, ddof=1)) # N-1 in the denominator
# do state assignment
excited_states = (np.real(reshaped) < self.threshold.value).sum(axis=self.mean_axis)
ground_states = self.num_averages - excited_states
for os in self.final_counts.output_streams:
os.push(ground_states)
os.push(excited_states)
self.sum_so_far[:] = 0.0
self.current_avg_frame[:] = 0.0
self.completed_averages = 0
self.idx_frame = 0
else:
# Emit a partial average since we've accumulated enough data
if (time.time() - self.last_update >= self.update_interval):
for os in self.partial_average.output_streams:
os.push(self.sum_so_far/self.completed_averages)
self.last_update = time.time()
# otherwise just add it to the carry
else:
self.carry = data[idx:]
break
|
py | 1a34eaaf81d75625d096fb3a3383b1d27220394c | import os
import argparse
import pandas as pd
def combine_ftype(on_private):
# Content_2_index = {
# 0: "Empty",
# 1: "Pasta",
# 2: "Rice",
# 3: "Water"
# }
if on_private:
vggish_path = './filling_type/vggish/predictions/200903163404/ftype_private_test_agg_vggish.csv'
rf_path = './filling_type/CORSMAL-pyAudioAnalysis/ftype-randomforest-final_result_private_test.csv'
else:
vggish_path = './filling_type/vggish/predictions/200903163404/ftype_public_test_agg_vggish.csv'
rf_path = './filling_type/CORSMAL-pyAudioAnalysis/ftype-randomforest-final_result_public_test.csv'
vggish = pd.read_csv(vggish_path)
ftype_randomforest = pd.read_csv(rf_path)
ftype_randomforest = ftype_randomforest.sort_values(['Object', 'Sequence']).reset_index(drop=True)
random_forest_preds = ftype_randomforest[[
'Filling type prob0', 'Filling type prob1', 'Filling type prob2', 'Filling type prob3'
]]
vggish_preds = vggish[['ftype_prob_0', 'ftype_prob_1', 'ftype_prob_2', 'ftype_prob_3']]
ftype_combined = (random_forest_preds.values + vggish_preds.values) / 2
# return pd.Series([Content_2_index[cls] for cls in ftype_combined.argmax(axis=1)])
return pd.Series([cls for cls in ftype_combined.argmax(axis=1)])
def combine_flvl(on_private):
# filling_2_value = {0: 0, 1: 50, 2: 90}
cols_with_probs_1 = ['flvl_prob_0', 'flvl_prob_1', 'flvl_prob_2']
if on_private:
vggish_path = './filling_level/vggish/predictions/200903162117/flvl_private_test_agg_vggish.csv'
r21d_path = './filling_level/r21d_rgb/predictions/200903214601/flvl_private_test_agg_r21d_rgb.csv'
rf_path = './filling_level/CORSMAL-pyAudioAnalysis/flevel-randomforest-final_result_private_test.csv'
else:
vggish_path = './filling_level/vggish/predictions/200903162117/flvl_public_test_agg_vggish.csv'
r21d_path = './filling_level/r21d_rgb/predictions/200903214601/flvl_public_test_agg_r21d_rgb.csv'
rf_path = './filling_level/CORSMAL-pyAudioAnalysis/flevel-randomforest-final_result_public_test.csv'
flvl_vggish = pd.read_csv(vggish_path)
flvl_r21d = pd.read_csv(r21d_path)
flvl_vggish = flvl_vggish[cols_with_probs_1]
flvl_r21d = flvl_r21d[cols_with_probs_1]
# flvl_combined = (flvl_vggish.values + flvl_r21d.values) / 2
# flvl_combined = flvl_vggish.values
# we also observed that adding pyAudioAnalysis' random forest predictions, improves valid performance
cols_with_probs_2 = ['Filling level [%] prob0', 'Filling level [%] prob1', 'Filling level [%] prob2']
flvl_rf = pd.read_csv(rf_path)
flvl_rf = flvl_rf.sort_values(['Object', 'Sequence']).reset_index(drop=True)
flvl_rf = flvl_rf[cols_with_probs_2]
flvl_combined = (flvl_vggish.values + flvl_r21d.values + flvl_rf.values) / 3
# return pd.Series([int(filling_2_value[cls]) for cls in flvl_combined.argmax(axis=1)])
return pd.Series([int(cls) for cls in flvl_combined.argmax(axis=1)])
def capacity(on_private):
if on_private:
cap_path = './capacity/results/estimation_combination_private_test.csv'
# cap_path = './capacity/results/estimation_combination_with_0_private_test.csv'
# cap_path = './capacity/results/estimation_combination_with_1_private_test.csv'
else:
cap_path = './capacity/results/estimation_combination_public_test.csv'
# cap_path = './capacity/results/estimation_combination_with_0_public_test.csv'
# cap_path = './capacity/results/estimation_combination_with_1_public_test.csv'
a = pd.read_csv(cap_path)
return a['capacity[mL]']
# def estimate_fmass(submission):
# Content_2_density = {
# "Empty": 0.0, # "Empty"
# 0: 0.0, # "Empty"
# "Pasta": 0.41, # "Pasta"
# 1: 0.41, # "Pasta"
# "Rice": 0.85, # "Rice"
# 2: 0.85, # "Rice"
# "Water": 1.00, # "Water"
# 3: 1.00, # "Water"
# }
# fmass_col = []
# for cont, seq, capacity, c_mass, ftype, flvl, fmass in submission.values:
# fmass = Content_2_density[ftype] * flvl / 100 * capacity
# fmass_col.append(fmass)
# return pd.Series(fmass_col)
def make_submission_form(data_path, on_private):
columns = ['Container ID', 'Sequence', 'Filling type', 'Filling level', 'Container Capacity']
submission = pd.DataFrame(columns=columns)
if on_private:
container_ids = ['13', '14', '15']
else:
container_ids = ['10', '11', '12']
# creating columns for container id and sequence using filenames from audio folder – 0053_audio.wav -> 53
object_list = []
sequence_list = []
for container_id in container_ids:
path = os.path.join(data_path, container_id, 'audio')
filenames = sorted(os.listdir(path))
seq_ids = [int(fname.replace('_audio.wav', '')) for fname in filenames]
sequence_list.extend(seq_ids)
object_list.extend([container_id] * len(seq_ids))
submission['Container ID'] = object_list
submission['Sequence'] = sequence_list
return submission
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--predict_on_private', dest='predict_on_private', action='store_true', default=False)
parser.add_argument('--data_path', default='./dataset/')
args = parser.parse_args()
# Gather prediction for the public test set
submission_public = make_submission_form(args.data_path, on_private=False)
submission_public['Filling type'] = combine_ftype(on_private=False)
submission_public['Filling level'] = combine_flvl(on_private=False)
submission_public['Container Capacity'] = capacity(on_private=False)
# submission_public['Filling mass'] = estimate_fmass(submission_public)
submission_public.to_csv('./submission_public_test.csv', index=False)
print('Formed predictions in ./submission_public_test.csv')
# If specified, gather prediction for the public test set
if args.predict_on_private:
submission_private = make_submission_form(args.data_path, on_private=True)
submission_private['Filling type'] = combine_ftype(on_private=True)
submission_private['Filling level'] = combine_flvl(on_private=True)
submission_private['Container Capacity'] = capacity(on_private=True)
# submission_private['Filling mass'] = estimate_fmass(submission_private)
submission_private.to_csv('./submission_private_test.csv', index=False)
print('Formed predictions in ./submission_private_test.csv')
|
py | 1a34eb2d493d5d6cfa0c60170088d80d9b88f7a9 | """custom_celeba dataset."""
from .custom_celeba import CustomCeleba
|
py | 1a34eb59c77f0eab3a70456f117958f215eb9a02 | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='row product-content-columns']/div[@class='col-sm-7 col-lg-5 product_page-right']/div[@class='general_info product-info']/h1[@class='product-title']",
'price' : "//div[@class='col-sm-7 col-lg-5 product_page-right']/div[@class='general_info product-info']/div[@class='price-section']/span[@class='price-new']",
'category' : "//div[@class='container']/ul[@class='breadcrumb']/li/a",
'description' : "//div[@class='container']/div[@class='row']/div[@id='content']/div[@id='tab-description']",
'images' : "//div[@class='row']/div[@id='content']/div[@class='row product-content-columns']/div[@class='col-sm-5 col-lg-7 product_page-left']//img/@src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "//base/@href",
'brand' : ""
}
name = 'shopphuongnguyen.com'
allowed_domains = ['shopphuongnguyen.com']
start_urls = ['http://shopphuongnguyen.com']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-z0-9]+$']), 'parse_item'),
Rule(LinkExtractor(allow=['/\.*']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
py | 1a34ee09531ac731851e702cfa01dd04b4f27e7d | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class StratifiedSampleTest(test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [array_ops.zeros([1, 3]), array_ops.ones([1, 5])]
label = constant_op.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([]),
probs,
batch_size,
init_probs,
enqueue_many=True)
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([1, 1]),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val,
constant_op.constant([0, 1, 0, 0, 0]),
probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampling_ops.stratified_sample(
array_ops.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
constant_op.constant(1),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
[array_ops.zeros([2, 1])],
label,
probs,
batch_size,
init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
label,
array_ops.placeholder(
dtypes.float32, shape=[None]),
batch_size,
init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [array_ops.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = array_ops.placeholder(dtypes.int32, shape=[None])
probs_ph = array_ops.placeholder(
dtypes.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [array_ops.zeros([2, 3, 4])]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = logging_ops.merge_summary(
ops.get_collection(ops.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [array_ops.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 1,
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, .3, 0, .7, 0],
enqueue_many=True)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = constant_op.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = array_ops.placeholder(
dtypes.float32) # completely undefined shape
labels_ph = array_ops.placeholder(
dtypes.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [
array_ops.zeros([2, 3, 4]), array_ops.ones([2, 4]), array_ops.ones(2) *
3
]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, ops.Tensor))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
random_seed.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(lbl1), lambda: constant_op.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: constant_op.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([5, 1]), array_ops.zeros([4, 1])],
accept_prob_fn,
batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = array_ops.placeholder(dtypes.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
random_seed.set_random_seed(1234)
sampling_ops.rejection_sample(
[array_ops.zeros([])],
accept_prob_fn,
batch_size,
runtime_checks=True,
name='rejection_sample')
prob_tensor = ops.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.test_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [
control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = sampling_ops.rejection_sample(tensor_list, accept_prob_fn,
batch_size)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
class ConditionalBatchTest(test.TestCase):
def testConditionallyEnqueueAndBatch(self):
random_seed.set_random_seed(1234)
tensor = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0), lambda: constant_op.constant(2.0))
keep_input = math_ops.equal(tensor, 2.0)
batch_size = 4
# Set up the test graph.
[batch] = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
# Check conditional operation.
with self.test_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batch_np = batch.eval()
coord.request_stop()
coord.join(threads)
# Check that all elements in batch come from tensors with acceptance prob
# 1, so that none come from acceptance prob 0.
self.assertListEqual(list(batch_np), [2.0] * batch_size)
def testConditionallyEnqueueAndBatchTypes(self):
tensor = constant_op.constant(1.0)
keep_input = constant_op.constant(True)
batch_size = 4
# Check that output types are the same for 1 and 2-length input lists.
output1 = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
output2 = sampling_ops._conditional_batch( # pylint: disable=protected-access
[tensor, tensor], keep_input, batch_size)
self.assertEqual(type(output1), type(output2))
if __name__ == '__main__':
test.main()
|
py | 1a34f03bbba8df71b7cea655adbb586f11244888 | import falcon
import simplejson as json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
from core import utilities
from decimal import Decimal
import excelexporters.spacestatistics
class Reporting:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: query the space
# Step 3: query energy categories
# Step 4: query associated sensors
# Step 5: query associated points
# Step 6: query base period energy input
# Step 7: query reporting period energy input
# Step 8: query tariff data
# Step 9: query associated sensors and points data
# Step 10: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
space_id = req.params.get('spaceid')
period_type = req.params.get('periodtype')
base_start_datetime_local = req.params.get('baseperiodstartdatetime')
base_end_datetime_local = req.params.get('baseperiodenddatetime')
reporting_start_datetime_local = req.params.get('reportingperiodstartdatetime')
reporting_end_datetime_local = req.params.get('reportingperiodenddatetime')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if space_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
else:
space_id = str.strip(space_id)
if not space_id.isdigit() or int(space_id) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
if period_type is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
else:
period_type = str.strip(period_type)
if period_type not in ['hourly', 'daily', 'monthly', 'yearly']:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
base_start_datetime_utc = None
if base_start_datetime_local is not None and len(str.strip(base_start_datetime_local)) > 0:
base_start_datetime_local = str.strip(base_start_datetime_local)
try:
base_start_datetime_utc = datetime.strptime(base_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_START_DATETIME")
base_end_datetime_utc = None
if base_end_datetime_local is not None and len(str.strip(base_end_datetime_local)) > 0:
base_end_datetime_local = str.strip(base_end_datetime_local)
try:
base_end_datetime_utc = datetime.strptime(base_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_END_DATETIME")
if base_start_datetime_utc is not None and base_end_datetime_utc is not None and \
base_start_datetime_utc >= base_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BASE_PERIOD_END_DATETIME')
if reporting_start_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
else:
reporting_start_datetime_local = str.strip(reporting_start_datetime_local)
try:
reporting_start_datetime_utc = datetime.strptime(reporting_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
if reporting_end_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
else:
reporting_end_datetime_local = str.strip(reporting_end_datetime_local)
try:
reporting_end_datetime_utc = datetime.strptime(reporting_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
if reporting_start_datetime_utc >= reporting_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_REPORTING_PERIOD_END_DATETIME')
################################################################################################################
# Step 2: query the space
################################################################################################################
cnx_system = mysql.connector.connect(**config.myems_system_db)
cursor_system = cnx_system.cursor()
cnx_energy = mysql.connector.connect(**config.myems_energy_db)
cursor_energy = cnx_energy.cursor()
cnx_historical = mysql.connector.connect(**config.myems_historical_db)
cursor_historical = cnx_historical.cursor()
cursor_system.execute(" SELECT id, name, area, cost_center_id "
" FROM tbl_spaces "
" WHERE id = %s ", (space_id,))
row_space = cursor_system.fetchone()
if row_space is None:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.SPACE_NOT_FOUND')
space = dict()
space['id'] = row_space[0]
space['name'] = row_space[1]
space['area'] = row_space[2]
space['cost_center_id'] = row_space[3]
################################################################################################################
# Step 3: query energy categories
################################################################################################################
energy_category_set = set()
# query energy categories in base period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], base_start_datetime_utc, base_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query energy categories in reporting period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], reporting_start_datetime_utc, reporting_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query all energy categories in base period and reporting period
cursor_system.execute(" SELECT id, name, unit_of_measure, kgce, kgco2e "
" FROM tbl_energy_categories "
" ORDER BY id ", )
rows_energy_categories = cursor_system.fetchall()
if rows_energy_categories is None or len(rows_energy_categories) == 0:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404,
title='API.NOT_FOUND',
description='API.ENERGY_CATEGORY_NOT_FOUND')
energy_category_dict = dict()
for row_energy_category in rows_energy_categories:
if row_energy_category[0] in energy_category_set:
energy_category_dict[row_energy_category[0]] = {"name": row_energy_category[1],
"unit_of_measure": row_energy_category[2],
"kgce": row_energy_category[3],
"kgco2e": row_energy_category[4]}
################################################################################################################
# Step 4: query associated sensors
################################################################################################################
point_list = list()
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_sensors se, tbl_spaces_sensors spse, "
" tbl_points po, tbl_sensors_points sepo "
" WHERE sp.id = %s AND sp.id = spse.space_id AND spse.sensor_id = se.id "
" AND se.id = sepo.sensor_id AND sepo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 5: query associated points
################################################################################################################
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_spaces_points sppo, tbl_points po "
" WHERE sp.id = %s AND sp.id = sppo.space_id AND sppo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 6: query base period energy input
################################################################################################################
base = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
base[energy_category_id] = dict()
base[energy_category_id]['timestamps'] = list()
base[energy_category_id]['values'] = list()
base[energy_category_id]['subtotal'] = Decimal(0.0)
base[energy_category_id]['mean'] = None
base[energy_category_id]['median'] = None
base[energy_category_id]['minimum'] = None
base[energy_category_id]['maximum'] = None
base[energy_category_id]['stdev'] = None
base[energy_category_id]['variance'] = None
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically, \
base[energy_category_id]['mean'], \
base[energy_category_id]['median'], \
base[energy_category_id]['minimum'], \
base[energy_category_id]['maximum'], \
base[energy_category_id]['stdev'], \
base[energy_category_id]['variance'] = \
utilities.statistics_hourly_data_by_period(rows_space_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
base[energy_category_id]['timestamps'].append(current_datetime)
base[energy_category_id]['values'].append(actual_value)
base[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 7: query reporting period energy input
################################################################################################################
reporting = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
reporting[energy_category_id] = dict()
reporting[energy_category_id]['timestamps'] = list()
reporting[energy_category_id]['values'] = list()
reporting[energy_category_id]['subtotal'] = Decimal(0.0)
reporting[energy_category_id]['mean'] = None
reporting[energy_category_id]['median'] = None
reporting[energy_category_id]['minimum'] = None
reporting[energy_category_id]['maximum'] = None
reporting[energy_category_id]['stdev'] = None
reporting[energy_category_id]['variance'] = None
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically, \
reporting[energy_category_id]['mean'], \
reporting[energy_category_id]['median'], \
reporting[energy_category_id]['minimum'], \
reporting[energy_category_id]['maximum'], \
reporting[energy_category_id]['stdev'], \
reporting[energy_category_id]['variance'] = \
utilities.statistics_hourly_data_by_period(rows_space_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
reporting[energy_category_id]['timestamps'].append(current_datetime)
reporting[energy_category_id]['values'].append(actual_value)
reporting[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 8: query tariff data
################################################################################################################
parameters_data = dict()
parameters_data['names'] = list()
parameters_data['timestamps'] = list()
parameters_data['values'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
energy_category_tariff_dict = utilities.get_energy_category_tariffs(space['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
tariff_timestamp_list = list()
tariff_value_list = list()
for k, v in energy_category_tariff_dict.items():
# convert k from utc to local
k = k + timedelta(minutes=timezone_offset)
tariff_timestamp_list.append(k.isoformat()[0:19][0:19])
tariff_value_list.append(v)
parameters_data['names'].append('TARIFF-' + energy_category_dict[energy_category_id]['name'])
parameters_data['timestamps'].append(tariff_timestamp_list)
parameters_data['values'].append(tariff_value_list)
################################################################################################################
# Step 9: query associated sensors and points data
################################################################################################################
for point in point_list:
point_values = []
point_timestamps = []
if point['object_type'] == 'ANALOG_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_analog_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'ENERGY_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_energy_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'DIGITAL_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_digital_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')')
parameters_data['timestamps'].append(point_timestamps)
parameters_data['values'].append(point_values)
################################################################################################################
# Step 10: construct the report
################################################################################################################
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
result = dict()
result['space'] = dict()
result['space']['name'] = space['name']
result['space']['area'] = space['area']
result['base_period'] = dict()
result['base_period']['names'] = list()
result['base_period']['units'] = list()
result['base_period']['timestamps'] = list()
result['base_period']['values'] = list()
result['base_period']['subtotals'] = list()
result['base_period']['means'] = list()
result['base_period']['medians'] = list()
result['base_period']['minimums'] = list()
result['base_period']['maximums'] = list()
result['base_period']['stdevs'] = list()
result['base_period']['variances'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['base_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period']['timestamps'].append(base[energy_category_id]['timestamps'])
result['base_period']['values'].append(base[energy_category_id]['values'])
result['base_period']['subtotals'].append(base[energy_category_id]['subtotal'])
result['base_period']['means'].append(base[energy_category_id]['mean'])
result['base_period']['medians'].append(base[energy_category_id]['median'])
result['base_period']['minimums'].append(base[energy_category_id]['minimum'])
result['base_period']['maximums'].append(base[energy_category_id]['maximum'])
result['base_period']['stdevs'].append(base[energy_category_id]['stdev'])
result['base_period']['variances'].append(base[energy_category_id]['variance'])
result['reporting_period'] = dict()
result['reporting_period']['names'] = list()
result['reporting_period']['energy_category_ids'] = list()
result['reporting_period']['units'] = list()
result['reporting_period']['timestamps'] = list()
result['reporting_period']['values'] = list()
result['reporting_period']['subtotals'] = list()
result['reporting_period']['means'] = list()
result['reporting_period']['means_per_unit_area'] = list()
result['reporting_period']['means_increment_rate'] = list()
result['reporting_period']['medians'] = list()
result['reporting_period']['medians_per_unit_area'] = list()
result['reporting_period']['medians_increment_rate'] = list()
result['reporting_period']['minimums'] = list()
result['reporting_period']['minimums_per_unit_area'] = list()
result['reporting_period']['minimums_increment_rate'] = list()
result['reporting_period']['maximums'] = list()
result['reporting_period']['maximums_per_unit_area'] = list()
result['reporting_period']['maximums_increment_rate'] = list()
result['reporting_period']['stdevs'] = list()
result['reporting_period']['stdevs_per_unit_area'] = list()
result['reporting_period']['stdevs_increment_rate'] = list()
result['reporting_period']['variances'] = list()
result['reporting_period']['variances_per_unit_area'] = list()
result['reporting_period']['variances_increment_rate'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period']['energy_category_ids'].append(energy_category_id)
result['reporting_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['reporting_period']['timestamps'].append(reporting[energy_category_id]['timestamps'])
result['reporting_period']['values'].append(reporting[energy_category_id]['values'])
result['reporting_period']['subtotals'].append(reporting[energy_category_id]['subtotal'])
result['reporting_period']['means'].append(reporting[energy_category_id]['mean'])
result['reporting_period']['means_per_unit_area'].append(
reporting[energy_category_id]['mean'] / space['area']
if reporting[energy_category_id]['mean'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['means_increment_rate'].append(
(reporting[energy_category_id]['mean'] - base[energy_category_id]['mean']) /
base[energy_category_id]['mean'] if (base[energy_category_id]['mean'] is not None and
base[energy_category_id]['mean'] > Decimal(0.0))
else None)
result['reporting_period']['medians'].append(reporting[energy_category_id]['median'])
result['reporting_period']['medians_per_unit_area'].append(
reporting[energy_category_id]['median'] / space['area']
if reporting[energy_category_id]['median'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['medians_increment_rate'].append(
(reporting[energy_category_id]['median'] - base[energy_category_id]['median']) /
base[energy_category_id]['median'] if (base[energy_category_id]['median'] is not None and
base[energy_category_id]['median'] > Decimal(0.0))
else None)
result['reporting_period']['minimums'].append(reporting[energy_category_id]['minimum'])
result['reporting_period']['minimums_per_unit_area'].append(
reporting[energy_category_id]['minimum'] / space['area']
if reporting[energy_category_id]['minimum'] is not None and
space['area'] is not None and space['area'] > Decimal(0.0)
else None)
result['reporting_period']['minimums_increment_rate'].append(
(reporting[energy_category_id]['minimum'] - base[energy_category_id]['minimum']) /
base[energy_category_id]['minimum'] if (base[energy_category_id]['minimum'] is not None and
base[energy_category_id]['minimum'] > Decimal(0.0))
else None)
result['reporting_period']['maximums'].append(reporting[energy_category_id]['maximum'])
result['reporting_period']['maximums_per_unit_area'].append(
reporting[energy_category_id]['maximum'] / space['area']
if reporting[energy_category_id]['maximum'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['maximums_increment_rate'].append(
(reporting[energy_category_id]['maximum'] - base[energy_category_id]['maximum']) /
base[energy_category_id]['maximum']
if (base[energy_category_id]['maximum'] is not None and
base[energy_category_id]['maximum'] > Decimal(0.0))
else None)
result['reporting_period']['stdevs'].append(reporting[energy_category_id]['stdev'])
result['reporting_period']['stdevs_per_unit_area'].append(
reporting[energy_category_id]['stdev'] / space['area']
if reporting[energy_category_id]['stdev'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['stdevs_increment_rate'].append(
(reporting[energy_category_id]['stdev'] - base[energy_category_id]['stdev']) /
base[energy_category_id]['stdev'] if (base[energy_category_id]['stdev'] is not None and
base[energy_category_id]['stdev'] > Decimal(0.0))
else None)
result['reporting_period']['variances'].append(reporting[energy_category_id]['variance'])
result['reporting_period']['variances_per_unit_area'].append(
reporting[energy_category_id]['variance'] / space['area']
if reporting[energy_category_id]['variance'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['variances_increment_rate'].append(
(reporting[energy_category_id]['variance'] - base[energy_category_id]['variance']) /
base[energy_category_id]['variance'] if (base[energy_category_id]['variance'] is not None and
base[energy_category_id]['variance'] > Decimal(0.0))
else None)
result['parameters'] = {
"names": parameters_data['names'],
"timestamps": parameters_data['timestamps'],
"values": parameters_data['values']
}
# export result to Excel file and then encode the file to base64 string
result['excel_bytes_base64'] = excelexporters.spacestatistics.export(result,
space['name'],
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
resp.body = json.dumps(result)
|
py | 1a34f1b7222c4e91fca7a6aa2db60479ec00ee71 | import pytest
import numpy as np
from .base import TestRefuter
@pytest.mark.usefixtures("fixed_seed")
class TestDataSubsetRefuter(object):
@pytest.mark.parametrize(["error_tolerance","estimator_method"],
[(0.01, "iv.instrumental_variable")])
def test_refutation_data_subset_refuter_continuous(self, error_tolerance, estimator_method):
refuter_tester = TestRefuter(error_tolerance, estimator_method, "data_subset_refuter")
refuter_tester.continuous_treatment_testsuite() # Run both
@pytest.mark.parametrize(["error_tolerance", "estimator_method"],
[(0.01, "backdoor.propensity_score_matching")])
def test_refutation_data_subset_refuter_binary(self, error_tolerance, estimator_method):
refuter_tester = TestRefuter(error_tolerance, estimator_method, "data_subset_refuter")
refuter_tester.binary_treatment_testsuite(tests_to_run="atleast-one-common-cause")
|
py | 1a34f1d85e1f8eae21d85decdea7db9bcbebdfb7 | import logging
from boto3.resources.action import ServiceAction, WaiterAction
from boto3.resources.params import create_request_parameters
from botocore import xform_name
from aioboto3.resources.response import AIOResourceHandler, AIORawHandler
logger = logging.getLogger(__name__)
class AIOServiceAction(ServiceAction):
def __init__(self, action_model, factory=None, service_context=None):
self._action_model = action_model
# In the simplest case we just return the response, but if a
# resource is defined, then we must create these before returning.
resource_response_model = action_model.resource
if resource_response_model:
self._response_handler = AIOResourceHandler(
search_path=resource_response_model.path,
factory=factory, resource_model=resource_response_model,
service_context=service_context,
operation_name=action_model.request.operation
)
else:
self._response_handler = AIORawHandler(action_model.path)
async def __call__(self, parent, *args, **kwargs):
operation_name = xform_name(self._action_model.request.operation)
# First, build predefined params and then update with the
# user-supplied kwargs, which allows overriding the pre-built
# params if needed.
params = create_request_parameters(parent, self._action_model.request)
params.update(kwargs)
logger.debug('Calling %s:%s with %r', parent.meta.service_name,
operation_name, params)
response = await getattr(parent.meta.client, operation_name)(**params)
logger.debug('Response: %r', response)
return await self._response_handler(parent, params, response)
class AioBatchAction(ServiceAction):
async def __call__(self, parent, *args, **kwargs):
service_name = None
client = None
responses = []
operation_name = xform_name(self._action_model.request.operation)
# Unlike the simple action above, a batch action must operate
# on batches (or pages) of items. So we get each page, construct
# the necessary parameters and call the batch operation.
async for page in parent.pages():
params = {}
for index, resource in enumerate(page):
# There is no public interface to get a service name
# or low-level client from a collection, so we get
# these from the first resource in the collection.
if service_name is None:
service_name = resource.meta.service_name
if client is None:
client = resource.meta.client
create_request_parameters(
resource, self._action_model.request,
params=params, index=index)
if not params:
# There are no items, no need to make a call.
break
params.update(kwargs)
logger.debug('Calling %s:%s with %r',
service_name, operation_name, params)
response = await (getattr(client, operation_name)(**params))
logger.debug('Response: %r', response)
responses.append(
self._response_handler(parent, params, response))
return responses
class AIOWaiterAction(WaiterAction):
async def __call__(self, parent, *args, **kwargs):
"""
Perform the wait operation after building operation
parameters.
:type parent: :py:class:`~boto3.resources.base.ServiceResource`
:param parent: The resource instance to which this action is attached.
"""
client_waiter_name = xform_name(self._waiter_model.waiter_name)
# First, build predefined params and then update with the
# user-supplied kwargs, which allows overriding the pre-built
# params if needed.
params = create_request_parameters(parent, self._waiter_model)
params.update(kwargs)
logger.debug('Calling %s:%s with %r',
parent.meta.service_name,
self._waiter_resource_name, params)
client = parent.meta.client
waiter = client.get_waiter(client_waiter_name)
response = await waiter.wait(**params)
logger.debug('Response: %r', response)
|
py | 1a34f254d38cbea119c5269846eb8e585781ff97 | """The test provides the basic capabilities to run numerous property tests."""
from datetime import timedelta
from datetime import datetime
import functools
import traceback
import shutil
import random
import os
import numpy as np
from property_auxiliary import distribute_command_line_arguments
from property_auxiliary import process_command_line_arguments
from property_auxiliary import get_random_string
from property_auxiliary import run_property_test
from property_auxiliary import print_rslt_ext
from property_auxiliary import collect_tests
from property_auxiliary import finish
def choose_module(inp_dict):
"""Chooses a module with probability proportional to number of stored tests."""
prob_dist = np.array([])
for module in inp_dict.keys():
prob_dist = np.append(prob_dist, len(inp_dict[module]))
prob_dist = prob_dist / np.sum(prob_dist)
return np.random.choice(list(inp_dict.keys()), p=prob_dist)
def run(args):
"""This function runs the property test battery."""
args = distribute_command_line_arguments(args)
test_dict = collect_tests()
rslt = dict()
for module in test_dict.keys():
rslt[module] = dict()
for test in test_dict[module]:
rslt[module][test] = [0, 0]
if args["is_check"]:
np.random.seed(args["seed"])
module = choose_module(test_dict)
test = np.random.choice(test_dict[module])
run_property_test(module, test)
else:
err_msg = []
start, timeout = datetime.now(), timedelta(hours=args["hours"])
print_rslt = functools.partial(print_rslt_ext, start, timeout)
print_rslt(rslt, err_msg)
while True:
seed = random.randrange(1, 100000)
dirname = get_random_string()
np.random.seed(seed)
module = choose_module(test_dict)
test = np.random.choice(test_dict[module])
try:
run_property_test(module, test, dirname)
rslt[module][test][0] += 1
except Exception:
rslt[module][test][1] += 1
msg = traceback.format_exc()
err_msg += [(module, test, seed, msg)]
os.chdir("../")
shutil.rmtree(dirname)
print_rslt(rslt, err_msg)
if timeout < datetime.now() - start:
break
finish(rslt)
if __name__ == "__main__":
args = process_command_line_arguments("property")
run(args)
|
py | 1a34f2ee2572fd32d6dff565c51608a1daa98170 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('message', '0002_message_date'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='date',
),
]
|
py | 1a34f3aff5f487a913fd4bded077377cd5fb3b66 | import pytest
import time
from fixture.application import Application
from selenium import webdriver
def test_first_open(app):
app.session.open_home_page()
time.sleep(2)
assert app.driver.find_element_by_id('header').is_displayed() == True
def test_open_admin(app):
app.session.open_home_page()
|
py | 1a34f42b05242e4613041747d17ffc799b1e7da8 | """Tests for chebyshev module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
run_module_suite
)
def trim(x):
return cheb.chebtrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(object):
def test__cseries_to_zseries(self):
for i in range(5):
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self):
for i in range(5):
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(object):
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic(object):
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self):
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(cheb.chebval(x, [1]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims)
def test_chebval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = cheb.chebval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_chebval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = cheb.chebval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_chebgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = cheb.chebgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_chebgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(object):
def test_chebint(self):
# check exceptions
assert_raises(ValueError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = cheb.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i])
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(cheb.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1)
res = cheb.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k])
res = cheb.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T
res = cheb.chebint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c) for c in c2d])
res = cheb.chebint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d])
res = cheb.chebint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(object):
def test_chebder(self):
# check exceptions
assert_raises(ValueError, cheb.chebder, [0], .5)
assert_raises(ValueError, cheb.chebder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = cheb.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T
res = cheb.chebder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebder(c) for c in c2d])
res = cheb.chebder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_chebvander(self):
# check for 1d x
x = np.arange(3)
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
def test_chebvander2d(self):
# also tests chebval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = cheb.chebvander2d(x1, x2, [1, 2])
tgt = cheb.chebval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_chebvander3d(self):
# also tests chebval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3])
tgt = cheb.chebval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(object):
def test_chebfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
assert_raises(TypeError, cheb.chebfit, [], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,])
assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, cheb.chebfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = cheb.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
coef3 = cheb.chebfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
#
coef4 = cheb.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
#
coef2d = cheb.chebfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = cheb.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1])
# test fitting only even polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = cheb.chebfit(x, y, 4)
assert_almost_equal(cheb.chebval(x, coef1), y)
coef2 = cheb.chebfit(x, y, [0, 2, 4])
assert_almost_equal(cheb.chebval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestInterpolate(object):
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(-1, 1, 10)
for deg in range(0, 10):
for p in range(0, deg + 1):
c = cheb.chebinterpolate(powx, deg, (p,))
assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
assert_raises(ValueError, cheb.chebcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(cheb.chebcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
class TestGauss(object):
def test_100(self):
x, w = cheb.chebgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = cheb.chebvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.pi
assert_almost_equal(w.sum(), tgt)
class TestMisc(object):
def test_chebfromroots(self):
res = cheb.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = [0]*i + [1]
res = cheb.chebfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res), trim(tgt))
def test_chebroots(self):
assert_almost_equal(cheb.chebroots([1]), [])
assert_almost_equal(cheb.chebroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = cheb.chebroots(cheb.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, cheb.chebtrim, coef, -1)
# Test results
assert_equal(cheb.chebtrim(coef), coef[:-1])
assert_equal(cheb.chebtrim(coef, 1), coef[:-3])
assert_equal(cheb.chebtrim(coef, 2), [0])
def test_chebline(self):
assert_equal(cheb.chebline(3, 4), [3, 4])
def test_cheb2poly(self):
for i in range(10):
assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i])
def test_poly2cheb(self):
for i in range(10):
assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)[1:-1]
tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x))
res = cheb.chebweight(x)
assert_almost_equal(res, tgt)
def test_chebpts1(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts1, 1.5)
assert_raises(ValueError, cheb.chebpts1, 0)
#test points
tgt = [0]
assert_almost_equal(cheb.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(cheb.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(cheb.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(cheb.chebpts1(4), tgt)
def test_chebpts2(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts2, 1.5)
assert_raises(ValueError, cheb.chebpts2, 1)
#test points
tgt = [-1, 1]
assert_almost_equal(cheb.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(cheb.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(cheb.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(cheb.chebpts2(5), tgt)
if __name__ == "__main__":
run_module_suite()
|
py | 1a34f42da25e0b3a212d2dd19486903f1a648604 | # Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
class _CutPlane():
def __init__(self, flow_data, x1='x', x2='y', x3_value=None):
"""
Initialize CutPlane object. Used to extract a 2D plane from a
3D vectoral velocity field
Args:
flow_data (np.array): 3D vector field of velocity data
x1 (str, optional): first dimension. Defaults to 'x'.
x2 (str, optional): second dimension. Defaults to 'y'.
x3_value (str, optional): third dimension. Defaults to None.
"""
# Assign the axis names
self.x1_name = x1
self.x2_name = x2
#TODO: if it will be assumed that x3 is one of x, y, or z that is x1 and x2,
# then we should verify that x1 and x2 are one of x, y, or z
self.x3_name = [x3 for x3 in ['x', 'y', 'z'] if x3 not in [x1, x2]][0]
# Get x1, x2 and x3 arrays
x1_array = getattr(flow_data, self.x1_name)
x2_array = getattr(flow_data, self.x2_name)
x3_array = getattr(flow_data, self.x3_name)
search_values = np.array(sorted(np.unique(x3_array)))
nearest_idx = (np.abs(search_values - x3_value)).argmin()
nearest_value = search_values[nearest_idx]
print('Nearest value in %s to %.2f is %.2f' %
(self.x3_name, x3_value, nearest_value))
# Select down the data
x3_select_mask = x3_array == nearest_value
# Store the un-interpolated input arrays at this slice
self.x1_in = x1_array[x3_select_mask]
self.x2_in = x2_array[x3_select_mask]
self.u_in = flow_data.u[x3_select_mask]
self.v_in = flow_data.v[x3_select_mask]
self.w_in = flow_data.w[x3_select_mask]
# Initially, x1_lin, x2_lin are unique values of input
self.x1_lin = np.unique(self.x1_in)
self.x2_lin = np.unique(self.x2_in)
# Save the resolution as the number of unique points in x1 and x2
self.resolution = (len(np.unique(self.x1_lin)),
len(np.unique(self.x2_lin)))
# Make initial meshing
self._remesh()
def _remesh(self):
# Mesh and interpolate u, v and w
self.x1_mesh, self.x2_mesh = np.meshgrid(self.x1_lin, self.x2_lin)
self.u_mesh = griddata(
np.column_stack([self.x1_in, self.x2_in]),
self.u_in, (self.x1_mesh.flatten(), self.x2_mesh.flatten()),
method='cubic')
self.v_mesh = griddata(
np.column_stack([self.x1_in, self.x2_in]),
self.v_in, (self.x1_mesh.flatten(), self.x2_mesh.flatten()),
method='cubic')
self.w_mesh = griddata(
np.column_stack([self.x1_in, self.x2_in]),
self.w_in, (self.x1_mesh.flatten(), self.x2_mesh.flatten()),
method='cubic')
# Save flat vectors
self.x1_flat = self.x1_mesh.flatten()
self.x2_flat = self.x2_mesh.flatten()
# Save u-cubed
self.u_cubed = self.u_mesh**3
# Define horizontal subclass
class HorPlane(_CutPlane):
"""
Subclass of _CutPlane. Shortcut to extracting a horizontal plane.
"""
def __init__(self, flow_data, z_value):
"""
Initialize horizontal CutPlane
Args:
flow_data (np.array): 3D vector field of velocity data
z_value (float): vertical position through which to slice
"""
# Set up call super
super().__init__(flow_data, x1='x', x2='y', x3_value=z_value)
# Define cross plane subclass
class CrossPlane(_CutPlane):
"""
Subclass of _CutPlane. Shortcut to extracting a cross-stream plane.
"""
def __init__(self, flow_data, x_value):
"""
Initialize cross-stream CutPlane
Args:
flow_data (np.array): 3D vector field of velocity data
x_value (float): streamwise position through which to slice
"""
# Set up call super
super().__init__(flow_data, x1='y', x2='z', x3_value=x_value)
# Define cross plane subclass
class VertPlane(_CutPlane):
"""
Subclass of _CutPlane. Shortcut to extracting a streamwise-vertical plane.
"""
def __init__(self, flow_data, y_value):
"""
Initialize streamwise-vertical CutPlane
Args:
flow_data (np.array): 3D vector field of velocity data
y_value (float): spanwise position through which to slice
"""
# Set up call super
super().__init__(flow_data, x1='x', x2='z', x3_value=y_value)
## Modification functions
def set_origin(cut_plane, center_x1=0.0, center_x2=0.0):
"""
Establish the origin of a CutPlane object.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
center_x1 (float, optional): x1-coordinate of orign.
Defaults to 0.0.
center_x2 (float, optional): x2-coordinate of orign.
Defaults to 0.0.
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Store the un-interpolated input arrays at this slice
cut_plane.x1_in = cut_plane.x1_in - center_x1
cut_plane.x2_in = cut_plane.x2_in - center_x2
cut_plane.x1_lin = cut_plane.x1_lin - center_x1
cut_plane.x2_lin = cut_plane.x2_lin - center_x2
# Remesh
cut_plane._remesh()
return cut_plane
def change_resolution(cut_plane, resolution=(100, 100)):
"""
Modify default resolution of a CutPlane object.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
resolution (tuple, optional): Desired resolution in x1 and x2.
Defaults to (100, 100).
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Grid the data
cut_plane.x1_lin = np.linspace(min(cut_plane.x1_in), max(cut_plane.x1_in),
resolution[0])
cut_plane.x2_lin = np.linspace(min(cut_plane.x2_in), max(cut_plane.x2_in),
resolution[1])
# Save the new resolution
cut_plane.resolution = resolution
# Redo the mesh
cut_plane._remesh()
# Return the cutplane
return cut_plane
def interpolate_onto_array(cut_plane, x1_array, x2_array):
"""
Interpolate a CutPlane object onto specified coordinate arrays.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
x1_array (np.array): specified x1-coordinate.
x2_array (np.array): specified x2-coordinate.
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Grid the data given array
cut_plane.x1_lin = x1_array
cut_plane.x2_lin = x2_array
# Save the new resolution
cut_plane.resolution = (len(np.unique(cut_plane.x1_lin)),
len(np.unique(cut_plane.x2_lin)))
# Redo the mesh
cut_plane._remesh()
# Return the cutplane
return cut_plane
def rescale_axis(cut_plane, x1_factor=1.0, x2_factor=1.0):
"""
Stretch or compress CutPlane coordinates.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
x1_factor (float): scaling factor for x1-coordinate.
x2_factor (float): scaling factor for x2-coordinate.
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Store the un-interpolated input arrays at this slice
cut_plane.x1_in = cut_plane.x1_in / x1_factor
cut_plane.x2_in = cut_plane.x2_in / x2_factor
cut_plane.x1_lin = cut_plane.x1_lin / x1_factor
cut_plane.x2_lin = cut_plane.x2_lin / x2_factor
# Remesh
cut_plane._remesh()
return cut_plane
def calculate_wind_speed(cross_plane, x1_loc, x2_loc, R):
"""
Calculate effective wind speed within specified range of a point.
Args:
cross_plane (:py:class:`floris.tools.cut_plane.CrossPlane`):
plane of data.
x1_loc (float): x1-coordinate of point of interst.
x2_loc (float): x2-coordinate of point of interst.
R (float): radius from point of interst to consider
Returns:
(float): effective wind speed
"""
# Make a distance column
distance = np.sqrt((cross_plane.x1_flat - x1_loc)**2 +
(cross_plane.x2_flat - x2_loc)**2)
# Return the mean wind speed
return np.cbrt(np.mean(cross_plane.u_cubed[distance < R]))
def calculate_power(cross_plane,
x1_loc,
x2_loc,
R,
ws_array,
cp_array,
air_density=1.225):
"""
Calculate maximum power available in a given cross plane.
Args:
cross_plane (:py:class:`floris.tools.cut_plane.CrossPlane`):
plane of data.
x1_loc (float): x1-coordinate of point of interst.
x2_loc (float): x2-coordinate of point of interst.
R (float): Radius of wind turbine rotor.
ws_array (np.array): reference wind speed for cp curve.
cp_array (np.array): cp curve at reference wind speeds.
air_density (float, optional): air density. Defaults to 1.225.
Returns:
float: Power!
"""
# Compute the ws
ws = calculate_wind_speed(cross_plane, x1_loc, x2_loc, R)
# Compute the cp
cp_value = np.interp(ws, ws_array, cp_array)
#Return the power
return 0.5 * air_density * (np.pi * R**2) * cp_value * ws**3
# def get_profile(self, R, x2_loc, resolution=100, x1_locs=None):
# if x1_locs is None:
# x1_locs = np.linspace(
# min(self.x1_flat), max(self.x1_flat), resolution)
# v_array = np.array([self.calculate_wind_speed(
# x1_loc, x2_loc, R) for x1_loc in x1_locs])
# return x1_locs, v_array)
# def get_power_profile(self, ws_array, cp_array, rotor_radius, air_density=1.225, resolution=100, x1_locs=None):
# # Get the wind speed profile
# x1_locs, v_array = self.get_profile(resolution=resolution, x1_locs=x1_locs)
# # Get Cp
# cp_array = np.interp(v_array,ws_array,cp_array)
# # Return power array
# return x1_locs, 0.5 * air_density * (np.pi * rotor_radius**2) * cp_array * v_array**3
|
py | 1a34f48d88e9a461d809784e403b7a5d265197de | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Copyright 2009, Benjamin Kampmann <[email protected]>
# Copyright 2014, Hartmut Goebel <[email protected]>
# Copyright 2018, Pol Canelles <[email protected]>
from twisted.internet import reactor
from coherence.base import Coherence
from coherence.upnp.core import DIDLLite
# browse callback
def process_media_server_browse(result, client):
print(f"browsing root of: {client.device.get_friendly_name()}")
print(f"result contains: {result['NumberReturned']}", end=' ')
print(f"out of {result['TotalMatches']} total matches.")
elt = DIDLLite.DIDLElement.fromString(result['Result'])
for item in elt.getItems():
if item.upnp_class.startswith("object.container"):
print(" container", item.title, f"({item.id})", end=' ')
print("with", item.childCount, "items.")
if item.upnp_class.startswith("object.item"):
print(" item", item.title, f"({item.id}).")
# called for each media server found
def media_server_found(device):
print(f"Media Server found: {device.get_friendly_name()}")
d = device.client.content_directory.browse(
0,
browse_flag='BrowseDirectChildren',
process_result=False,
backward_compatibility=False)
d.addCallback(process_media_server_browse, device.client)
# sadly they sometimes get removed as well :(
def media_server_removed(*args):
print(f'Media Server gone: {args}')
def start():
# Initialize coherence and make sure that
# at least we have one server to explore
coherence = Coherence(
{'logmode': 'warning',
'controlpoint': 'yes',
'plugin': [
{'backend': 'LolcatsStore',
'name': 'Cohen3 LolcatsStore',
'proxy': 'no',
},
]
}
)
coherence.bind(coherence_device_detection_completed=media_server_found)
coherence.bind(coherence_device_removed=media_server_removed)
if __name__ == "__main__":
reactor.callWhenRunning(start)
reactor.run()
|
py | 1a34f4f54e5162c4402b302c01d337335d222222 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import multiprocessing as mp
from utils import iou_with_anchors
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
# 获取测试集视频信息
def getDatasetDict(opt):
df = pd.read_csv(opt["video_info"])
json_data = load_json(opt["video_anno"])
database = json_data
video_dict = {}
for i in range(len(df)):
video_name = df.video.values[i]
video_info = database[video_name]
video_new_info = {}
video_new_info['duration_frame'] = video_info['duration_frame']
video_new_info['duration_second'] = video_info['duration_second']
video_new_info["feature_frame"] = video_info['feature_frame']
video_subset = df.subset.values[i]
video_new_info['annotations'] = video_info['annotations']
if video_subset == 'validation':
video_dict[video_name] = video_new_info
return video_dict # 4728
# soft_nms是分别对每个视频进行处理
def soft_nms(df, alpha, t1, t2):
'''
df: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
df = df.sort_values(by="score", ascending=False) # 按得分降序排列
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
# 每个视频获取前100个提议
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(
np.array(tstart),
np.array(tend), tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > t1 + (t2 - t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) /
alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def video_post_process(opt, video_list, video_dict):
for video_name in video_list:
df = pd.read_csv("./output/BMN_results/" + video_name + ".csv")
if len(df) > 1:
snms_alpha = opt["soft_nms_alpha"]
snms_t1 = opt["soft_nms_low_thres"]
snms_t2 = opt["soft_nms_high_thres"]
df = soft_nms(df, snms_alpha, snms_t1, snms_t2)
df = df.sort_values(by="score", ascending=False)
video_info = video_dict[video_name]
video_duration = float(video_info["duration_frame"] // 16 * 16) / video_info["duration_frame"] * video_info[
"duration_second"]
proposal_list = []
for j in range(min(100, len(df))):
tmp_proposal = {}
tmp_proposal["score"] = df.score.values[j]
tmp_proposal["segment"] = [max(0, df.xmin.values[j]) * video_duration,
min(1, df.xmax.values[j]) * video_duration]
proposal_list.append(tmp_proposal)
result_dict[video_name[2:]] = proposal_list
def BMN_post_processing(opt):
video_dict = getDatasetDict(opt)
video_list = list(video_dict.keys()) # [:100]
global result_dict
result_dict = mp.Manager().dict()
num_videos = len(video_list)
num_videos_per_thread = num_videos // opt["post_process_thread"] # 8个线程(591个视频)
# 创建多线程
processes = []
for tid in range(opt["post_process_thread"] - 1):
tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) * num_videos_per_thread]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
# 避免不能整除的情况,即处理剩余的视频
tmp_video_list = video_list[(opt["post_process_thread"] - 1) * num_videos_per_thread:]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
for p in processes:
p.join()
result_dict = dict(result_dict)
output_dict = {"version": "VERSION 1.3", "results": result_dict, "external_data": {}}
outfile = open(opt["result_file"], "w")
json.dump(output_dict, outfile)
outfile.close()
# opt = opts.parse_opt()
# opt = vars(opt)
# BSN_post_processing(opt)
|
py | 1a34f7873ded41abc07e4b7d128a03046a4b9b0e | # Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SearchIO objects to model high scoring regions between query and hit."""
import warnings
from operator import ge, le
from Bio import BiopythonWarning
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio._utils import getattr_str, trim_str
from Bio.SearchIO._utils import singleitem, allitems, fullcascade, fragcascade
from ._base import _BaseHSP
class HSP(_BaseHSP):
"""Class representing high-scoring region(s) between query and hit.
HSP (high-scoring pair) objects are contained by Hit objects (see Hit).
In most cases, HSP objects store the bulk of the statistics and results
(e.g. e-value, bitscores, query sequence, etc.) produced by a search
program.
Depending on the search output file format, a given HSP will contain one
or more HSPFragment object(s). Examples of search programs that produce HSP
with one HSPFragments are BLAST, HMMER, and FASTA. Other programs such as
BLAT or Exonerate may produce HSPs containing more than one HSPFragment.
However, their native terminologies may differ: in BLAT these fragments
are called 'blocks' while in Exonerate they are called exons or NER.
Here are examples from each type of HSP. The first one comes from a BLAST
search::
>>> from Bio import SearchIO
>>> blast_qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> blast_hsp = blast_qresult[1][0] # the first HSP from the second hit
>>> blast_hsp
HSP(hit_id='gi|301171311|ref|NR_035856.1|', query_id='33211', 1 fragments)
>>> print(blast_hsp)
Query: 33211 mir_1
Hit: gi|301171311|ref|NR_035856.1| Pan troglodytes microRNA mir-520b ...
Query range: [1:61] (1)
Hit range: [0:60] (1)
Quick stats: evalue 1.7e-22; bitscore 109.49
Fragments: 1 (60 columns)
Query - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
For HSPs with a single HSPFragment, you can invoke ``print`` on it and see the
underlying sequence alignment, if it exists. This is not the case for HSPs
with more than one HSPFragment. Below is an example, using an HSP from a
BLAT search. Invoking ``print`` on these HSPs will instead show a table of the
HSPFragment objects it contains::
>>> blat_qresult = SearchIO.read('Blat/mirna.pslx', 'blat-psl', pslx=True)
>>> blat_hsp = blat_qresult[1][0] # the first HSP from the second hit
>>> blat_hsp
HSP(hit_id='chr11', query_id='blat_1', 2 fragments)
>>> print(blat_hsp)
Query: blat_1 <unknown description>
Hit: chr11 <unknown description>
Query range: [42:67] (-1)
Hit range: [59018929:59018955] (1)
Quick stats: evalue ?; bitscore ?
Fragments: --- -------------- ---------------------- ----------------------
# Span Query range Hit range
--- -------------- ---------------------- ----------------------
0 6 [61:67] [59018929:59018935]
1 16 [42:58] [59018939:59018955]
Notice that in HSPs with more than one HSPFragments, the HSP's ``query_range``
``hit_range`` properties encompasses all fragments it contains.
You can check whether an HSP has more than one HSPFragments or not using the
``is_fragmented`` property::
>>> blast_hsp.is_fragmented
False
>>> blat_hsp.is_fragmented
True
Since HSP objects are also containers similar to Python lists, you can
access a single fragment in an HSP using its integer index::
>>> blat_fragment = blat_hsp[0]
>>> print(blat_fragment)
Query: blat_1 <unknown description>
Hit: chr11 <unknown description>
Query range: [61:67] (-1)
Hit range: [59018929:59018935] (1)
Fragments: 1 (6 columns)
Query - tatagt
Hit - tatagt
This applies to HSPs objects with a single fragment as well::
>>> blast_fragment = blast_hsp[0]
>>> print(blast_fragment)
Query: 33211 mir_1
Hit: gi|301171311|ref|NR_035856.1| Pan troglodytes microRNA mir-520b ...
Query range: [1:61] (1)
Hit range: [0:60] (1)
Fragments: 1 (60 columns)
Query - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
Regardless of the search output file format, HSP objects provide the
properties listed below. These properties always return values in a list,
due to the HSP object itself being a list-like container. However, for
HSP objects with a single HSPFragment, shortcut properties that fetches
the item from the list are also provided.
+----------------------+---------------------+-----------------------------+
| Property | Shortcut | Value |
+======================+=====================+=============================+
| aln_all | aln | HSP alignments as |
| | | MultipleSeqAlignment object |
+----------------------+---------------------+-----------------------------+
| aln_annotation_all | aln_annotation | dictionary of annotation(s) |
| | | of all fragments' alignments|
+----------------------+---------------------+-----------------------------+
| fragments | fragment | HSPFragment objects |
+----------------------+---------------------+-----------------------------+
| hit_all | hit | hit sequence as SeqRecord |
| | | objects |
+----------------------+---------------------+-----------------------------+
| hit_features_all | hit_features | SeqFeatures of all hit |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| hit_start_all | hit_start* | start coordinates of the |
| | | hit fragments |
+----------------------+---------------------+-----------------------------+
| hit_end_all | hit_end* | end coordinates of the hit |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| hit_span_all | hit_span* | sizes of each hit fragments |
+----------------------+---------------------+-----------------------------+
| hit_strand_all | hit_strand | strand orientations of the |
| | | hit fragments |
+----------------------+---------------------+-----------------------------+
| hit_frame_all | hit_frame | reading frames of the hit |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| hit_range_all | hit_range | tuples of start and end |
| | | coordinates of each hit |
| | | fragment |
+----------------------+---------------------+-----------------------------+
| query_all | query | query sequence as SeqRecord |
| | | object |
+----------------------+---------------------+-----------------------------+
| query_features_all | query_features | SeqFeatures of all query |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_start_all | query_start* | start coordinates of the |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_end_all | query_end* | end coordinates of the |
| | | query fragments |
+----------------------+---------------------+-----------------------------+
| query_span_all | query_span* | sizes of each query |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_strand_all | query_strand | strand orientations of the |
| | | query fragments |
+----------------------+---------------------+-----------------------------+
| query_frame_all | query_frame | reading frames of the query |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_range_all | query_range | tuples of start and end |
| | | coordinates of each query |
| | | fragment |
+----------------------+---------------------+-----------------------------+
For all types of HSP objects, the property will return the values in a list.
Shorcuts are only applicable for HSPs with one fragment. Except the ones
noted, if they are used on an HSP with more than one fragments, an exception
will be raised.
For properties that may be used in HSPs with multiple or single fragments
(``*_start``, ``*_end``, and ``*_span`` properties), their interpretation depends
on how many fragment the HSP has:
+------------+---------------------------------------------------+
| Property | Value |
+============+===================================================+
| hit_start | smallest coordinate value of all hit fragments |
+------------+---------------------------------------------------+
| hit_end | largest coordinate value of all hit fragments |
+------------+---------------------------------------------------+
| hit_span | difference between ``hit_start`` and ``hit_end`` |
+------------+---------------------------------------------------+
| query_start| smallest coordinate value of all query fragments |
+------------+---------------------------------------------------+
| query_end | largest coordinate value of all query fragments |
+------------+---------------------------------------------------+
| query_span | difference between ``query_start`` and |
| | ``query_end`` |
+------------+---------------------------------------------------+
In addition to the objects listed above, HSP objects also provide the
following properties and/or attributes:
+--------------------+------------------------------------------------------+
| Property | Value |
+====================+======================================================+
| aln_span | total number of residues in all HSPFragment objects |
+--------------------+------------------------------------------------------+
| alphabet | alphabet used in hit and query SeqRecord objects |
+--------------------+------------------------------------------------------+
| is_fragmented | boolean, whether there are multiple fragments or not |
+--------------------+------------------------------------------------------+
| hit_id | ID of the hit sequence |
+--------------------+------------------------------------------------------+
| hit_description | description of the hit sequence |
+--------------------+------------------------------------------------------+
| hit_inter_ranges | list of hit sequence coordinates of the regions |
| | between fragments |
+--------------------+------------------------------------------------------+
| hit_inter_spans | list of lengths of the regions between hit fragments |
+--------------------+------------------------------------------------------+
| output_index | 0-based index for storing the order by which the HSP |
| | appears in the output file (default: -1). |
+--------------------+------------------------------------------------------+
| query_id | ID of the query sequence |
+--------------------+------------------------------------------------------+
| query_description | description of the query sequence |
+--------------------+------------------------------------------------------+
| query_inter_ranges | list of query sequence coordinates of the regions |
| | between fragments |
+--------------------+------------------------------------------------------+
| query_inter_spans | list of lengths of the regions between query |
| | fragments |
+--------------------+------------------------------------------------------+
.. [1] may be used in HSPs with multiple fragments
"""
# attributes we don't want to transfer when creating a new Hit class
# from this one
_NON_STICKY_ATTRS = ("_items",)
def __init__(self, fragments=(), output_index=-1):
"""Initialize an HSP object.
:param fragments: fragments contained in the HSP object
:type fragments: iterable yielding HSPFragment
:param output_index: optional index / ordering of the HSP fragment in
the original input file.
:type output_index: integer
HSP objects must be initialized with a list containing at least one
HSPFragment object. If multiple HSPFragment objects are used for
initialization, they must all have the same ``query_id``,
``query_description``, ``hit_id``, ``hit_description``, and alphabet
properties.
"""
if not fragments:
raise ValueError("HSP objects must have at least one HSPFragment object.")
# TODO - Move this into the for look in case hsps is a single use
# iterable?
# check that all fragments contain the same IDs, descriptions, alphabet
for attr in (
"query_id",
"query_description",
"hit_id",
"hit_description",
"alphabet",
):
if len({getattr(frag, attr) for frag in fragments}) != 1:
raise ValueError(
"HSP object can not contain fragments with more than one %s." % attr
)
self.output_index = output_index
self._items = []
for fragment in fragments:
self._validate_fragment(fragment)
self._items.append(fragment)
def __repr__(self):
"""Return string representation of HSP object."""
return "%s(hit_id=%r, query_id=%r, %r fragments)" % (
self.__class__.__name__,
self.hit_id,
self.query_id,
len(self),
)
def __iter__(self):
"""Iterate over HSP items."""
return iter(self._items)
def __contains__(self, fragment):
"""Return True if HSPFragment is on HSP items."""
return fragment in self._items
def __len__(self):
"""Return number of HSPs items."""
return len(self._items)
def __bool__(self):
"""Return True if it has HSPs."""
return bool(self._items)
def __str__(self):
"""Return a human readable summary of the HSP object."""
lines = []
# set hsp info line
statline = []
# evalue
evalue = getattr_str(self, "evalue", fmt="%.2g")
statline.append("evalue " + evalue)
# bitscore
bitscore = getattr_str(self, "bitscore", fmt="%.2f")
statline.append("bitscore " + bitscore)
lines.append("Quick stats: " + "; ".join(statline))
if len(self.fragments) == 1:
return "\n".join(
[self._str_hsp_header(), "\n".join(lines), self.fragments[0]._str_aln()]
)
else:
lines.append(
" Fragments: %s %s %s %s" % ("-" * 3, "-" * 14, "-" * 22, "-" * 22)
)
pattern = "%16s %14s %22s %22s"
lines.append(pattern % ("#", "Span", "Query range", "Hit range"))
lines.append(pattern % ("-" * 3, "-" * 14, "-" * 22, "-" * 22))
for idx, block in enumerate(self.fragments):
# set hsp line and table
# alignment span
aln_span = getattr_str(block, "aln_span")
# query region
query_start = getattr_str(block, "query_start")
query_end = getattr_str(block, "query_end")
query_range = "[%s:%s]" % (query_start, query_end)
# max column length is 20
query_range = trim_str(query_range, 22, "~]")
# hit region
hit_start = getattr_str(block, "hit_start")
hit_end = getattr_str(block, "hit_end")
hit_range = "[%s:%s]" % (hit_start, hit_end)
hit_range = trim_str(hit_range, 22, "~]")
# append the hsp row
lines.append(pattern % (str(idx), aln_span, query_range, hit_range))
return self._str_hsp_header() + "\n" + "\n".join(lines)
def __getitem__(self, idx):
"""Return object of index idx."""
# if key is slice, return a new HSP instance
if isinstance(idx, slice):
obj = self.__class__(self._items[idx])
self._transfer_attrs(obj)
return obj
return self._items[idx]
def __setitem__(self, idx, fragments):
"""Set an item of index idx with the given fragments."""
# handle case if hsps is a list of hsp
if isinstance(fragments, (list, tuple)):
for fragment in fragments:
self._validate_fragment(fragment)
else:
self._validate_fragment(fragments)
self._items[idx] = fragments
def __delitem__(self, idx):
"""Delete item of index idx."""
# note that this may result in an empty HSP object, which should be
# invalid
del self._items[idx]
def _validate_fragment(self, fragment):
if not isinstance(fragment, HSPFragment):
raise TypeError("HSP objects can only contain HSPFragment objects.")
# HACK: to make validation during __init__ work
if self._items:
if fragment.hit_id != self.hit_id:
raise ValueError(
"Expected HSPFragment with hit ID %r, found %r instead."
% (self.id, fragment.hit_id)
)
if fragment.hit_description != self.hit_description:
raise ValueError(
"Expected HSPFragment with hit description %r, found %r instead."
% (self.description, fragment.hit_description)
)
if fragment.query_id != self.query_id:
raise ValueError(
"Expected HSPFragment with query ID %r, found %r instead."
% (self.query_id, fragment.query_id)
)
if fragment.query_description != self.query_description:
raise ValueError(
"Expected HSP with query description %r, found %r instead."
% (self.query_description, fragment.query_description)
)
def _aln_span_get(self):
# length of all alignments
# alignment span can be its own attribute, or computed from
# query / hit length
return sum(frg.aln_span for frg in self.fragments)
aln_span = property(
fget=_aln_span_get, doc="Total number of columns in all HSPFragment objects."
)
# coordinate properties #
def _get_coords(self, seq_type, coord_type):
assert seq_type in ("hit", "query")
assert coord_type in ("start", "end")
coord_name = "%s_%s" % (seq_type, coord_type)
coords = [getattr(frag, coord_name) for frag in self.fragments]
if None in coords:
warnings.warn(
"'None' exist in %s coordinates; ignored" % (coord_name),
BiopythonWarning,
)
return coords
def _hit_start_get(self):
return min(self._get_coords("hit", "start"))
hit_start = property(
fget=_hit_start_get, doc="Smallest coordinate value of all hit fragments."
)
def _query_start_get(self):
return min(self._get_coords("query", "start"))
query_start = property(
fget=_query_start_get, doc="Smallest coordinate value of all query fragments."
)
def _hit_end_get(self):
return max(self._get_coords("hit", "end"))
hit_end = property(
fget=_hit_end_get, doc="Largest coordinate value of all hit fragments."
)
def _query_end_get(self):
return max(self._get_coords("query", "end"))
query_end = property(
fget=_query_end_get, doc="Largest coordinate value of all hit fragments."
)
# coordinate-dependent properties #
def _hit_span_get(self):
try:
return self.hit_end - self.hit_start
except TypeError: # triggered if any of the coordinates are None
return None
hit_span = property(
fget=_hit_span_get, doc="The number of hit residues covered by the HSP."
)
def _query_span_get(self):
try:
return self.query_end - self.query_start
except TypeError: # triggered if any of the coordinates are None
return None
query_span = property(
fget=_query_span_get, doc="The number of query residues covered by the HSP."
)
def _hit_range_get(self):
return (self.hit_start, self.hit_end)
hit_range = property(
fget=_hit_range_get, doc="Tuple of HSP hit start and end coordinates."
)
def _query_range_get(self):
return (self.query_start, self.query_end)
query_range = property(
fget=_query_range_get, doc="Tuple of HSP query start and end coordinates."
)
def _inter_ranges_get(self, seq_type):
# this property assumes that there are no mixed strands in a hit/query
assert seq_type in ("query", "hit")
strand = getattr(self, "%s_strand_all" % seq_type)[0]
coords = getattr(self, "%s_range_all" % seq_type)
# determine function used to set inter range
# start and end coordinates, given two pairs
# of fragment start and end coordinates
if strand == -1:
startfunc, endfunc = min, max
else:
startfunc, endfunc = max, min
inter_coords = []
for idx, coord in enumerate(coords[:-1]):
start = startfunc(coords[idx])
end = endfunc(coords[idx + 1])
inter_coords.append((min(start, end), max(start, end)))
return inter_coords
def _hit_inter_ranges_get(self):
return self._inter_ranges_get("hit")
hit_inter_ranges = property(
fget=_hit_inter_ranges_get,
doc="Hit sequence coordinates of the regions between fragments.",
)
def _query_inter_ranges_get(self):
return self._inter_ranges_get("query")
query_inter_ranges = property(
fget=_query_inter_ranges_get,
doc="Query sequence coordinates of the regions between fragments.",
)
def _inter_spans_get(self, seq_type):
assert seq_type in ("query", "hit")
attr_name = "%s_inter_ranges" % seq_type
return [coord[1] - coord[0] for coord in getattr(self, attr_name)]
def _hit_inter_spans_get(self):
return self._inter_spans_get("hit")
hit_inter_spans = property(
fget=_hit_inter_spans_get, doc="Lengths of regions between hit fragments."
)
def _query_inter_spans_get(self):
return self._inter_spans_get("query")
query_inter_spans = property(
fget=_query_inter_spans_get, doc="Lengths of regions between query fragments."
)
# shortcuts for fragments' properties #
# bool check if there's more than one fragments
is_fragmented = property(
lambda self: len(self) > 1,
doc="Whether the HSP has more than one HSPFragment objects.",
)
# first item properties with setters
hit_description = fullcascade(
"hit_description", doc="Description of the hit sequence."
)
query_description = fullcascade(
"query_description", doc="Description of the query sequence."
)
hit_id = fullcascade("hit_id", doc="ID of the hit sequence.")
query_id = fullcascade("query_id", doc="ID of the query sequence.")
alphabet = fullcascade(
"alphabet", doc="Alphabet used in hit and query SeqRecord objects."
)
# properties for single-fragment HSPs
fragment = singleitem(doc="HSPFragment object, first fragment.")
hit = singleitem("hit", doc="Hit sequence as a SeqRecord object, first fragment.")
query = singleitem(
"query", doc="Query sequence as a SeqRecord object, first fragment."
)
aln = singleitem(
"aln", doc="Alignment of the first fragment as a MultipleSeqAlignment object."
)
aln_annotation = singleitem(
"aln_annotation",
doc="Dictionary of annotation(s) of the first fragment's alignment.",
)
hit_features = singleitem(
"hit_features", doc="Hit sequence features, first fragment."
)
query_features = singleitem(
"query_features", doc="Query sequence features, first fragment."
)
hit_strand = singleitem("hit_strand", doc="Hit strand orientation, first fragment.")
query_strand = singleitem(
"query_strand", doc="Query strand orientation, first fragment."
)
hit_frame = singleitem(
"hit_frame", doc="Hit sequence reading frame, first fragment."
)
query_frame = singleitem(
"query_frame", doc="Query sequence reading frame, first fragment."
)
# properties for multi-fragment HSPs
fragments = allitems(doc="List of all HSPFragment objects.")
hit_all = allitems(
"hit", doc="List of all fragments' hit sequences as SeqRecord objects."
)
query_all = allitems(
"query", doc="List of all fragments' query sequences as SeqRecord objects."
)
aln_all = allitems(
"aln", doc="List of all fragments' alignments as MultipleSeqAlignment objects."
)
aln_annotation_all = allitems(
"aln_annotation",
doc="Dictionary of annotation(s) of all fragments' alignments.",
)
hit_features_all = allitems(
"hit_features", doc="List of all hit sequence features."
)
query_features_all = allitems(
"query_features", doc="List of all query sequence features."
)
hit_strand_all = allitems(
"hit_strand", doc="List of all fragments' hit sequence strands."
)
query_strand_all = allitems(
"query_strand", doc="List of all fragments' query sequence strands"
)
hit_frame_all = allitems(
"hit_frame", doc="List of all fragments' hit sequence reading frames."
)
query_frame_all = allitems(
"query_frame", doc="List of all fragments' query sequence reading frames."
)
hit_start_all = allitems(
"hit_start", doc="List of all fragments' hit start coordinates."
)
query_start_all = allitems(
"query_start", doc="List of all fragments' query start coordinates."
)
hit_end_all = allitems("hit_end", doc="List of all fragments' hit end coordinates.")
query_end_all = allitems(
"query_end", doc="List of all fragments' query end coordinates."
)
hit_span_all = allitems("hit_span", doc="List of all fragments' hit sequence size.")
query_span_all = allitems(
"query_span", doc="List of all fragments' query sequence size."
)
hit_range_all = allitems(
"hit_range", doc="List of all fragments' hit start and end coordinates."
)
query_range_all = allitems(
"query_range", doc="List of all fragments' query start and end coordinates."
)
class HSPFragment(_BaseHSP):
"""Class representing a contiguous alignment of hit-query sequence.
HSPFragment forms the core of any parsed search output file. Depending on
the search output file format, it may contain the actual query and/or hit
sequences that produces the search hits. These sequences are stored as
SeqRecord objects (see SeqRecord):
>>> from Bio import SearchIO
>>> qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> fragment = qresult[0][0][0] # first hit, first hsp, first fragment
>>> print(fragment)
Query: 33211 mir_1
Hit: gi|262205317|ref|NR_030195.1| Homo sapiens microRNA 520b (MIR520...
Query range: [0:61] (1)
Hit range: [0:61] (1)
Fragments: 1 (61 columns)
Query - CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
# the query sequence is a SeqRecord object
>>> fragment.query.__class__
<class 'Bio.SeqRecord.SeqRecord'>
>>> print(fragment.query)
ID: 33211
Name: aligned query sequence
Description: mir_1
Number of features: 0
Seq('CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTT...GGG', DNAAlphabet())
# the hit sequence is a SeqRecord object as well
>>> fragment.hit.__class__
<class 'Bio.SeqRecord.SeqRecord'>
>>> print(fragment.hit)
ID: gi|262205317|ref|NR_030195.1|
Name: aligned hit sequence
Description: Homo sapiens microRNA 520b (MIR520B), microRNA
Number of features: 0
Seq('CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTT...GGG', DNAAlphabet())
# when both query and hit are present, we get a MultipleSeqAlignment object
>>> fragment.aln.__class__
<class 'Bio.Align.MultipleSeqAlignment'>
>>> print(fragment.aln)
DNAAlphabet() alignment with 2 rows and 61 columns
CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAG...GGG 33211
CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAG...GGG gi|262205317|ref|NR_030195.1|
"""
def __init__(
self,
hit_id="<unknown id>",
query_id="<unknown id>",
hit=None,
query=None,
alphabet=single_letter_alphabet,
):
"""Initialize the class."""
self._alphabet = alphabet
self.aln_annotation = {}
self._hit_id = hit_id
self._query_id = query_id
for seq_type in ("query", "hit"):
# query or hit attributes default attributes
setattr(self, "_%s_description" % seq_type, "<unknown description>")
setattr(self, "_%s_features" % seq_type, [])
# query or hit attributes whose default attribute is None
for attr in ("strand", "frame", "start", "end"):
setattr(self, "%s_%s" % (seq_type, attr), None)
# self.query or self.hit
if eval(seq_type):
setattr(self, seq_type, eval(seq_type))
else:
setattr(self, seq_type, None)
def __repr__(self):
"""Return HSPFragment info; hit id, query id, number of columns."""
info = "hit_id=%r, query_id=%r" % (self.hit_id, self.query_id)
try:
info += ", %i columns" % len(self)
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, info)
def __len__(self):
"""Return alignment span."""
return self.aln_span
def __str__(self):
"""Return string of HSP header and alignments."""
return self._str_hsp_header() + "\n" + self._str_aln()
def __getitem__(self, idx):
"""Return object of index idx."""
if self.aln is not None:
obj = self.__class__(
hit_id=self.hit_id, query_id=self.query_id, alphabet=self.alphabet
)
# transfer query and hit attributes
# let SeqRecord handle feature slicing, then retrieve the sliced
# features into the sliced HSPFragment
if self.query is not None:
obj.query = self.query[idx]
obj.query_features = obj.query.features
if self.hit is not None:
obj.hit = self.hit[idx]
obj.hit_features = obj.hit.features
# description, strand, frame
for attr in ("description", "strand", "frame"):
for seq_type in ("hit", "query"):
attr_name = "%s_%s" % (seq_type, attr)
self_val = getattr(self, attr_name)
setattr(obj, attr_name, self_val)
# alignment annotation should be transferred, since we can compute
# the resulting annotation
obj.aln_annotation = {}
for key, value in self.aln_annotation.items():
assert len(value[idx]) == len(obj)
obj.aln_annotation[key] = value[idx]
return obj
else:
raise TypeError(
"Slicing for HSP objects without alignment is not supported."
)
def _str_aln(self):
lines = []
# alignment length
aln_span = getattr_str(self, "aln_span")
lines.append(" Fragments: 1 (%s columns)" % aln_span)
# sequences
if self.query is not None and self.hit is not None:
try:
qseq = str(self.query.seq)
except AttributeError: # query is None
qseq = "?"
try:
hseq = str(self.hit.seq)
except AttributeError: # hit is None
hseq = "?"
# similarity line
simil = ""
if "similarity" in self.aln_annotation and isinstance(
self.aln_annotation.get("similarity"), str
):
simil = self.aln_annotation["similarity"]
if self.aln_span <= 67:
lines.append("%10s - %s" % ("Query", qseq))
if simil:
lines.append(" %s" % simil)
lines.append("%10s - %s" % ("Hit", hseq))
else:
# adjust continuation character length, so we don't display
# the same residues twice
if self.aln_span - 66 > 3:
cont = "~" * 3
else:
cont = "~" * (self.aln_span - 66)
lines.append("%10s - %s%s%s" % ("Query", qseq[:59], cont, qseq[-5:]))
if simil:
lines.append(" %s%s%s" % (simil[:59], cont, simil[-5:]))
lines.append("%10s - %s%s%s" % ("Hit", hseq[:59], cont, hseq[-5:]))
return "\n".join(lines)
# sequence properties #
def _set_seq(self, seq, seq_type):
"""Check the given sequence for attribute setting (PRIVATE).
:param seq: sequence to check
:type seq: string or SeqRecord
:param seq_type: sequence type
:type seq_type: string, choice of 'hit' or 'query'
"""
assert seq_type in ("hit", "query")
if seq is None:
return seq # return immediately if seq is None
else:
if not isinstance(seq, (str, SeqRecord)):
raise TypeError(
"%s sequence must be a string or a SeqRecord object." % seq_type
)
# check length if the opposite sequence is not None
opp_type = "hit" if seq_type == "query" else "query"
opp_seq = getattr(self, "_%s" % opp_type, None)
if opp_seq is not None:
if len(seq) != len(opp_seq):
raise ValueError(
"Sequence lengths do not match. Expected: %r (%s); found: %r (%s)."
% (len(opp_seq), opp_type, len(seq), seq_type)
)
seq_id = getattr(self, "%s_id" % seq_type)
seq_desc = getattr(self, "%s_description" % seq_type)
seq_feats = getattr(self, "%s_features" % seq_type)
seq_name = "aligned %s sequence" % seq_type
if isinstance(seq, SeqRecord):
seq.id = seq_id
seq.description = seq_desc
seq.name = seq_name
seq.features = seq_feats
seq.seq.alphabet = self.alphabet
elif isinstance(seq, str):
seq = SeqRecord(
Seq(seq, self.alphabet),
id=seq_id,
name=seq_name,
description=seq_desc,
features=seq_feats,
)
return seq
def _hit_get(self):
return self._hit
def _hit_set(self, value):
self._hit = self._set_seq(value, "hit")
hit = property(
fget=_hit_get,
fset=_hit_set,
doc="Hit sequence as a SeqRecord object, defaults to None.",
)
def _query_get(self):
return self._query
def _query_set(self, value):
self._query = self._set_seq(value, "query")
query = property(
fget=_query_get,
fset=_query_set,
doc="Query sequence as a SeqRecord object, defaults to None.",
)
def _aln_get(self):
if self.query is None and self.hit is None:
return None
elif self.hit is None:
return MultipleSeqAlignment([self.query], self.alphabet)
elif self.query is None:
return MultipleSeqAlignment([self.hit], self.alphabet)
else:
return MultipleSeqAlignment([self.query, self.hit], self.alphabet)
aln = property(
fget=_aln_get,
doc="Query-hit alignment as a MultipleSeqAlignment object, defaults to None.",
)
def _alphabet_get(self):
return self._alphabet
def _alphabet_set(self, value):
self._alphabet = value
try:
self.query.seq.alphabet = value
except AttributeError:
pass
try:
self.hit.seq.alphabet = value
except AttributeError:
pass
alphabet = property(
fget=_alphabet_get,
fset=_alphabet_set,
doc="Alphabet object used in the fragment's "
"sequences and alignment, defaults to single_letter_alphabet.",
)
def _aln_span_get(self):
# length of alignment (gaps included)
# alignment span can be its own attribute, or computed from
# query / hit length
if not hasattr(self, "_aln_span"):
if self.query is not None:
self._aln_span = len(self.query)
elif self.hit is not None:
self._aln_span = len(self.hit)
return self._aln_span
def _aln_span_set(self, value):
self._aln_span = value
aln_span = property(
fget=_aln_span_get,
fset=_aln_span_set,
doc="The number of alignment columns covered by the fragment.",
)
# id, description, and features properties #
hit_description = fragcascade("description", "hit", doc="Hit sequence description.")
query_description = fragcascade(
"description", "query", doc="Query sequence description."
)
hit_id = fragcascade("id", "hit", doc="Hit sequence ID.")
query_id = fragcascade("id", "query", doc="Query sequence ID.")
hit_features = fragcascade("features", "hit", doc="Hit sequence features.")
query_features = fragcascade("features", "query", doc="Query sequence features.")
# strand properties #
def _prep_strand(self, strand):
# follow SeqFeature's convention
if strand not in (-1, 0, 1, None):
raise ValueError("Strand should be -1, 0, 1, or None; not %r" % strand)
return strand
def _get_strand(self, seq_type):
assert seq_type in ("hit", "query")
strand = getattr(self, "_%s_strand" % seq_type)
if strand is None:
# try to compute strand from frame
frame = getattr(self, "%s_frame" % seq_type)
if frame is not None:
try:
strand = frame // abs(frame)
except ZeroDivisionError:
strand = 0
setattr(self, "%s_strand" % seq_type, strand)
return strand
def _hit_strand_get(self):
return self._get_strand("hit")
def _hit_strand_set(self, value):
self._hit_strand = self._prep_strand(value)
hit_strand = property(
fget=_hit_strand_get,
fset=_hit_strand_set,
doc="Hit sequence strand, defaults to None.",
)
def _query_strand_get(self):
return self._get_strand("query")
def _query_strand_set(self, value):
self._query_strand = self._prep_strand(value)
query_strand = property(
fget=_query_strand_get,
fset=_query_strand_set,
doc="Query sequence strand, defaults to None.",
)
# frame properties #
def _prep_frame(self, frame):
if frame not in (-3, -2, -1, 0, 1, 2, 3, None):
raise ValueError(
"Strand should be an integer between -3 and 3, or None; not %r" % frame
)
return frame
def _hit_frame_get(self):
return self._hit_frame
def _hit_frame_set(self, value):
self._hit_frame = self._prep_frame(value)
hit_frame = property(
fget=_hit_frame_get,
fset=_hit_frame_set,
doc="Hit sequence reading frame, defaults to None.",
)
def _query_frame_get(self):
"""Get query sequence reading frame (PRIVATE)."""
return self._query_frame
def _query_frame_set(self, value):
"""Set query sequence reading frame (PRIVATE)."""
self._query_frame = self._prep_frame(value)
query_frame = property(
fget=_query_frame_get,
fset=_query_frame_set,
doc="Query sequence reading frame, defaults to None.",
)
# coordinate properties #
def _prep_coord(self, coord, opp_coord_name, op):
# coord must either be None or int
if coord is None:
return coord
assert isinstance(coord, int)
# try to get opposite coordinate, if it's not present, return
try:
opp_coord = getattr(self, opp_coord_name)
except AttributeError:
return coord
# if opposite coordinate is None, return
if opp_coord is None:
return coord
# otherwise compare it to coord ('>=' or '<=')
else:
assert op(coord, opp_coord)
return coord
def _hit_start_get(self):
"""Get the sequence hit start coordinate (PRIVATE)."""
return self._hit_start
def _hit_start_set(self, value):
"""Set the sequence hit start coordinate (PRIVATE)."""
self._hit_start = self._prep_coord(value, "hit_end", le)
hit_start = property(
fget=_hit_start_get,
fset=_hit_start_set,
doc="Hit sequence start coordinate, defaults to None.",
)
def _query_start_get(self):
"""Get the query sequence start coordinate (PRIVATE)."""
return self._query_start
def _query_start_set(self, value):
"""Set the query sequence start coordinate (PRIVATE)."""
self._query_start = self._prep_coord(value, "query_end", le)
query_start = property(
fget=_query_start_get,
fset=_query_start_set,
doc="Query sequence start coordinate, defaults to None.",
)
def _hit_end_get(self):
"""Get the hit sequence end coordinate (PRIVATE)."""
return self._hit_end
def _hit_end_set(self, value):
"""Set the hit sequence end coordinate (PRIVATE)."""
self._hit_end = self._prep_coord(value, "hit_start", ge)
hit_end = property(
fget=_hit_end_get,
fset=_hit_end_set,
doc="Hit sequence end coordinate, defaults to None.",
)
def _query_end_get(self):
"""Get the query sequence end coordinate (PRIVATE)."""
return self._query_end
def _query_end_set(self, value):
"""Set the query sequence end coordinate (PRIVATE)."""
self._query_end = self._prep_coord(value, "query_start", ge)
query_end = property(
fget=_query_end_get,
fset=_query_end_set,
doc="Query sequence end coordinate, defaults to None.",
)
# coordinate-dependent properties #
def _hit_span_get(self):
"""Return the number of residues covered by the hit sequence (PRIVATE)."""
try:
return self.hit_end - self.hit_start
except TypeError: # triggered if any of the coordinates are None
return None
hit_span = property(
fget=_hit_span_get, doc="The number of residues covered by the hit sequence."
)
def _query_span_get(self):
"""Return the number or residues covered by the query (PRIVATE)."""
try:
return self.query_end - self.query_start
except TypeError: # triggered if any of the coordinates are None
return None
query_span = property(
fget=_query_span_get,
doc="The number of residues covered by the query sequence.",
)
def _hit_range_get(self):
"""Return the start and end of a hit (PRIVATE)."""
return (self.hit_start, self.hit_end)
hit_range = property(
fget=_hit_range_get, doc="Tuple of hit start and end coordinates."
)
def _query_range_get(self):
"""Return the start and end of a query (PRIVATE)."""
return (self.query_start, self.query_end)
query_range = property(
fget=_query_range_get, doc="Tuple of query start and end coordinates."
)
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
py | 1a34f93d35a2aff98e7e2f58b57689cdc8579963 | from models.rules.AbstractRule import AbstractRule
from models.laundry_schedule.Garmet import Garmet
from models.laundry_schedule.LoadCollection import LoadCollection
from models.laundry_schedule.Load import Load
class IterativeRule(AbstractRule):
def process(self, garmets:list[Garmet]) -> LoadCollection:
loads = []
for garmet in garmets:
load = Load()
load.add(garmet=garmet)
loads.append(load)
return LoadCollection(loads=loads) |
py | 1a34fa3d3dc1d35684d7733521740ad0cc4d2af3 | # Copyright 2018 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy
import pandas
import mlflow
from mlflow.utils.file_utils import TempDir
from mlflow.projects import run
from mlflow import tracking
from mlflow.pyfunc import load_pyfunc
def test_linear():
old_uri = tracking.get_tracking_uri()
with TempDir(chdr=False, remove_on_exit=True) as tmp:
try:
diamonds = tmp.path("diamonds")
root_tracking_dir = tmp.path("root_tracking_dir")
os.mkdir(diamonds)
os.mkdir(root_tracking_dir)
tracking.set_tracking_uri(root_tracking_dir)
# Download the diamonds dataset via mlflow run
mlflow.set_experiment("test-experiment")
run(".", entry_point="main", version=None,
parameters={"dest-dir": diamonds},
mode="local", cluster_spec=None, git_username=None, git_password=None,
use_conda=True, storage_dir=None)
# Run the main linear app via mlflow
submitted_run = run(
"apps/linear-regression", entry_point="main", version=None,
parameters={"train": os.path.join(diamonds, "train_diamonds.parquet"),
"test": os.path.join(diamonds, "test_diamonds.parquet"),
"alpha": .001,
"l1-ratio": .5,
"label-col": "price"},
mode="local",
cluster_spec=None, git_username=None, git_password=None, use_conda=True,
storage_dir=None)
pyfunc = load_pyfunc(path="model", run_id=submitted_run.run_id)
df = pandas.read_parquet(os.path.join(diamonds, "test_diamonds.parquet"))
# Removing the price column from the DataFrame so we can use the features to predict
df = df.drop(columns="price")
# Predicting from the saved pyfunc
predict = pyfunc.predict(df)
# Make sure the data is of the right type
assert isinstance(predict[0], numpy.float64)
finally:
tracking.set_tracking_uri(old_uri)
|
py | 1a34fa48c0bb25b2f5e69d1bbe9d189b858bcb17 | """Editing JSON and JavaScript files in Sublime views"""
import json
from live.shared.js_cursor import StructuredCursor
def json_root_in(view):
return Entity(view, [])
class Entity:
def __init__(self, view, path):
self.view = view
self.path = path
def __getitem__(self, key):
return Entity(self.view, self.path + [key])
def append(self, item):
cur = StructuredCursor(0, self.view, inside_what='array')
for key in self.path:
cur.enter()
cur.goto_entry_keyed_by(json.dumps(key))
cur.prepare_for_insertion_at_end()
dump_py_as_json(cur, item)
def dump_py_as_json(cur, obj):
def insert(obj):
if isinstance(obj, dict):
with cur.laying_out('object') as separate:
for k, v in obj.items():
separate()
cur.insert(json.dumps(k))
cur.insert_keyval_sep()
insert(v)
elif isinstance(obj, list):
with cur.laying_out('array') as separate:
for v in obj:
separate()
insert(v)
elif isinstance(obj, (str, int, bool)):
cur.insert(json.dumps(obj))
else:
raise RuntimeError("Unsupported object for insertion in JSON: {}".format(obj))
insert(obj)
|
py | 1a34fa569e0a6334363cbf75e248d2ec26a235e4 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dialogflow.v2beta1 KnowledgeBases API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from dialogflow_v2beta1.gapic import enums
from dialogflow_v2beta1.gapic import knowledge_bases_client_config
from dialogflow_v2beta1.gapic.transports import knowledge_bases_grpc_transport
from dialogflow_v2beta1.proto import agent_pb2
from dialogflow_v2beta1.proto import agent_pb2_grpc
from dialogflow_v2beta1.proto import context_pb2
from dialogflow_v2beta1.proto import context_pb2_grpc
from dialogflow_v2beta1.proto import document_pb2
from dialogflow_v2beta1.proto import document_pb2_grpc
from dialogflow_v2beta1.proto import entity_type_pb2
from dialogflow_v2beta1.proto import entity_type_pb2_grpc
from dialogflow_v2beta1.proto import gcs_pb2
from dialogflow_v2beta1.proto import intent_pb2
from dialogflow_v2beta1.proto import intent_pb2_grpc
from dialogflow_v2beta1.proto import knowledge_base_pb2
from dialogflow_v2beta1.proto import knowledge_base_pb2_grpc
from dialogflow_v2beta1.proto import validation_result_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import struct_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("dialogflow").version
class KnowledgeBasesClient(object):
"""
Manages knowledge bases.
Allows users to setup and maintain knowledge bases with their knowledge data.
"""
SERVICE_ADDRESS = "dialogflow.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.dialogflow.v2beta1.KnowledgeBases"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
dialogflow_v2beta1.KnowledgeBasesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def knowledge_base_path(cls, project, knowledge_base):
"""Return a fully-qualified knowledge_base string."""
return google.api_core.path_template.expand(
"projects/{project}/knowledgeBases/{knowledge_base}",
project=project,
knowledge_base=knowledge_base,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.KnowledgeBasesGrpcTransport,
Callable[[~.Credentials, type], ~.KnowledgeBasesGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = knowledge_bases_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=knowledge_bases_grpc_transport.KnowledgeBasesGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = knowledge_bases_grpc_transport.KnowledgeBasesGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_knowledge_bases(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns the list of all knowledge bases of the specified agent.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_knowledge_bases(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_knowledge_bases(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The project to list of knowledge bases for. Format:
``projects/<Project ID>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_knowledge_bases" not in self._inner_api_calls:
self._inner_api_calls[
"list_knowledge_bases"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_knowledge_bases,
default_retry=self._method_configs["ListKnowledgeBases"].retry,
default_timeout=self._method_configs["ListKnowledgeBases"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.ListKnowledgeBasesRequest(
parent=parent, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_knowledge_bases"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="knowledge_bases",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_knowledge_base(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves the specified knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> name = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> response = client.get_knowledge_base(name)
Args:
name (str): Required. The name of the knowledge base to retrieve. Format
``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"get_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_knowledge_base,
default_retry=self._method_configs["GetKnowledgeBase"].retry,
default_timeout=self._method_configs["GetKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.GetKnowledgeBaseRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_knowledge_base(
self,
parent,
knowledge_base,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `knowledge_base`:
>>> knowledge_base = {}
>>>
>>> response = client.create_knowledge_base(parent, knowledge_base)
Args:
parent (str): Required. The project to create a knowledge base for. Format:
``projects/<Project ID>``.
knowledge_base (Union[dict, ~google.cloud.dialogflow_v2beta1.types.KnowledgeBase]): Required. The knowledge base to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"create_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_knowledge_base,
default_retry=self._method_configs["CreateKnowledgeBase"].retry,
default_timeout=self._method_configs["CreateKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.CreateKnowledgeBaseRequest(
parent=parent, knowledge_base=knowledge_base
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_knowledge_base(
self,
name,
force=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes the specified knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> name = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> client.delete_knowledge_base(name)
Args:
name (str): Required. The name of the knowledge base to delete. Format:
``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
force (bool): Optional. Force deletes the knowledge base. When set to true, any documents
in the knowledge base are also deleted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"delete_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_knowledge_base,
default_retry=self._method_configs["DeleteKnowledgeBase"].retry,
default_timeout=self._method_configs["DeleteKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.DeleteKnowledgeBaseRequest(name=name, force=force)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_knowledge_base(
self,
knowledge_base,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the specified knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> # TODO: Initialize `knowledge_base`:
>>> knowledge_base = {}
>>>
>>> response = client.update_knowledge_base(knowledge_base)
Args:
knowledge_base (Union[dict, ~google.cloud.dialogflow_v2beta1.types.KnowledgeBase]): Required. The knowledge base to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase`
update_mask (Union[dict, ~google.cloud.dialogflow_v2beta1.types.FieldMask]): Optional. Not specified means ``update all``. Currently, only
``display_name`` can be updated, an InvalidArgument will be returned for
attempting to update other fields.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"update_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_knowledge_base,
default_retry=self._method_configs["UpdateKnowledgeBase"].retry,
default_timeout=self._method_configs["UpdateKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.UpdateKnowledgeBaseRequest(
knowledge_base=knowledge_base, update_mask=update_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("knowledge_base.name", knowledge_base.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
py | 1a34fbff635f942fa07246ad86dd5d1b6742573e | from django.contrib import admin
import nested_admin
from django.utils.html import format_html
from django.utils.translation import gettext as _
from mptt.admin import DraggableMPTTAdmin
from .models import Tax, Category, Notification, Currency, Carrier
# Register your models here.
class CategoriesAdmin(DraggableMPTTAdmin):
fields = ('name','slug')
list_display = ('tree_actions', 'getname','slug')
list_display_links = ('getname',)
prepopulated_fields = {'slug': ('name',)}
def getname(self, instance):
return format_html(
'<div style="text-indent:{}px">{}</div>',
instance._mpttfield('level') * self.mptt_level_indent,
instance.name, # Or whatever you want to put here
)
getname.short_description = _('Name')
class CarriersAdmin(admin.ModelAdmin):
list_display = ['name', 'price','delivery_text']
# list_filter = ('name', 'price')
admin.site.register(Carrier, CarriersAdmin)
admin.site.register(Category, CategoriesAdmin)
admin.site.register([Tax, Notification, Currency]) |
py | 1a34fcdbb56405f1ceccfb0b05f33a5ef6b749a4 | # quick-write python script to calculate, plot and write out the n(z) for the BOSS and 2dFLenS samples
# we can compare the BOSS n(z) for the full NGP with the samples within the KiDS footprint
# CH: 12th Dec 2019
from astropy.io import fits
import numpy as np
from matplotlib import rcParams
import matplotlib.pyplot as plt
# Some font setting
rcParams['ps.useafm'] = True
rcParams['pdf.use14corefonts'] = True
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 19}
plt.rc('font', **font)
plt.figure(figsize=(8,7))
#This is where the catalogues live on cuillin
DD='/disk09/KIDS/K1000_TWO_PT_STATS/GGLCATS'
#To apply the 9-band photo-zs mask use
#bitmask=0x6FFC
#You might however just want to know where the gri information is
#for this you want
bitmask=0x681C
#what resolution do you want the n(z) binned with?
dz=0.01
#Now lets see the n(z) when the KiDS mask has been applied
for ilens in range(2):
#input data
bossfile=DD+'/BOSS_data_z'+str(ilens+1)+'.fits'
twodffile=DD+'/2dFLenS_data_z'+str(ilens+1)+'.fits'
#output ascii n(z)
bossnzfile=DD+'/N_of_Z/BOSS_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
twodfnzfile=DD+'/N_of_Z/2dFLenS_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
twodf_w_nzfile=DD+'/N_of_Z/2dFLenS_weighted_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
allnzfile=DD+'/N_of_Z/BOSS_and_2dFLenS_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
#set up the z-range to bin over and the number of bins
if ilens==0:
zmin=0.2
zmax=0.5
else:
zmin=0.5
zmax=0.75
nbins=np.int((zmax-zmin)/dz)
#Read in the BOSS catalogue weights and the MASK
hdulist = fits.open(bossfile)
bosscat = hdulist[1].data
BKIDSMASK=bosscat.field('KIDSMASK')
bossz=bosscat.field('Z')
bossweight = bosscat.field('WEICOMP')
#filter based on the 9-band or gri mask
ibfilter=np.logical_not(np.array(BKIDSMASK.astype(int) & bitmask, dtype=bool))
#histogram the redshifts within the KiDS footprint
if ilens==0:
mylabel='BOSS in KiDS'
else:
mylabel=None
n, bins, patches = plt.hist(bossz[ibfilter], nbins, normed=True, weights=bossweight[ibfilter], color='red',histtype=u'step',label=mylabel,linewidth=3)
#write out the mean redshift
print ('BOSS %d %s'%(ilens,np.average(bossz[ibfilter],weights=bossweight[ibfilter])))
#and write out to file (note reporting the left corner of the bin here)
np.savetxt(bossnzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#Read in the 2dFLenS catalogue and the MASK
hdulist = fits.open(twodffile)
twodfcat = hdulist[1].data
TKIDSMASK=twodfcat.field('KIDSMASK')
twodfz=twodfcat.field('Z')
twodfweight = twodfcat.field('WEIMAG')
twodfweightcomp = twodfcat.field('WEICOMP')
#filter based on the 9-band or gri mask
itfilter=np.logical_not(np.array(TKIDSMASK.astype(int) & bitmask, dtype=bool))
#this with no weights
if ilens==0:
mylabel='2dFLenS in KiDS'
else:
mylabel=None
n, bins, patches = plt.hist(twodfz[itfilter], nbins, normed=True, color='green', histtype=u'step',label=mylabel,linewidth=2.5)
np.savetxt(twodfnzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#write out the mean redshift
print ('2dFLenS %d %s'%(ilens,np.average(twodfz[itfilter])))
#here we apply the gri weights
if ilens==0:
mylabel='2dFLenS weighted'
else:
mylabel=None
n, bins, patches = plt.hist(twodfz[itfilter], nbins, normed=True, weights=twodfweight[itfilter], color='blue', histtype=u'step',label=mylabel,linewidth=2.5)
np.savetxt(twodf_w_nzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#what does a combined unweighted 2dFLenS and BOSS in KiDS n(z) look like?
allinkids = np.append(twodfz[itfilter],bossz[ibfilter])
allweight = np.append(twodfweightcomp[itfilter],bossweight[ibfilter])
if ilens==0:
mylabel='BOSS and 2dFLenS'
else:
mylabel=None
n, bins, patches = plt.hist(allinkids, nbins, normed=True, weights=allweight, color='orange', histtype=u'step',label=mylabel,linewidth=2.5)
np.savetxt(allnzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#write out the mean redshift
print ('All %d %s'%(ilens,np.average(allinkids,weights=allweight)))
#Lets overplot the n(z) in the original NGP data files
original_datafile=DD+'/BOSS_original/galaxy_DR12v5_CMASSLOWZTOT_North.fits'
hdulist = fits.open(original_datafile)
datacat = hdulist[1].data
zspec=datacat.field('Z')
weicp = datacat.field('WEIGHT_CP')
weinoz = datacat.field('WEIGHT_NOZ')
weisys = datacat.field('WEIGHT_SYSTOT')
weicompboss = weisys*(weinoz+weicp-1.)
zbin1filt=((zspec <=0.5) & (zspec>0.2))
nbins1=np.int((0.5-0.2)/dz)
bossallnzfile1=DD+'/N_of_Z/BOSS_NGP_n_of_z1.txt'
zbin2filt=((zspec <=0.75) & (zspec>0.5))
nbins2=np.int((0.75-0.5)/dz)
bossallnzfile2=DD+'/N_of_Z/BOSS_NGP_n_of_z2.txt'
n, bins, patches = plt.hist(zspec[zbin1filt], nbins1, normed=True, weights=weicompboss[zbin1filt], color='black', alpha=0.75,label='BOSS all',histtype=u'step',linewidth=4)
np.savetxt(bossallnzfile1,np.c_[bins[0:nbins1],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
n, bins, patches = plt.hist(zspec[zbin2filt], nbins2, normed=True, weights=weicompboss[zbin2filt], color='black', alpha=0.75, histtype=u'step',linewidth=4)
np.savetxt(bossallnzfile2,np.c_[bins[0:nbins2],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
plt.xlim(0.15,0.8)
plt.xlabel('z')
plt.ylabel('n(z)')
plt.legend(loc = 'upper left', fontsize=14)
plt.savefig('BOSS-2dFLenS-nofz.png')
plt.show()
|
py | 1a34fd51a3afd272261e9ced41b40fec096cab42 | from os import getenv
from fastapi import Request
from fastapi.params import Depends
from fastapi.templating import Jinja2Templates
from pyngrok import conf, ngrok
from lnbits.core.models import User
from lnbits.decorators import check_user_exists
from . import ngrok_ext, ngrok_renderer
templates = Jinja2Templates(directory="templates")
def log_event_callback(log):
string = str(log)
string2 = string[string.find('url="https') : string.find('url="https') + 80]
if string2:
string3 = string2
string4 = string3[4:]
global string5
string5 = string4.replace('"', "")
conf.get_default().log_event_callback = log_event_callback
ngrok_authtoken = getenv("NGROK_AUTHTOKEN")
if ngrok_authtoken is not None:
ngrok.set_auth_token(ngrok_authtoken)
port = getenv("PORT")
ngrok_tunnel = ngrok.connect(port)
@ngrok_ext.get("/")
async def index(request: Request, user: User = Depends(check_user_exists)):
return ngrok_renderer().TemplateResponse(
"ngrok/index.html", {"request": request, "ngrok": string5, "user": user.dict()}
)
|
py | 1a34fd88e62f8333905db52fc9f326d6fb00bd53 | import time
import os
import sys
import cPickle as pickle
import argparse
from argparse import RawTextHelpFormatter
from multiprocessing.pool import Pool
from multiprocessing.process import Process
import threading
import Queue
import msvcrt
import cProfile, pstats, StringIO
from rgkit import run as rgrun
from rgkit.game import Player
from generation import Generation
import constants
progress_q = Queue.Queue(maxsize=1)
early_end_q = Queue.Queue(maxsize=1)
class ProgressInfo:
def __init__(self, scores=[], gen=0, last_backup=0, save_file=None):
self.scores = scores
self.gen = gen
self.last_backup = last_backup
self.save_file = save_file
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Robogen execution script.",
formatter_class=RawTextHelpFormatter)
parser.add_argument("-l", "--load_file",
help="File containing a previously saved Generation.",
default=None)
parser.add_argument("-s", "--save_file",
help="File to save last Generation to.",
default=constants.default_save)
parser.add_argument("-g", "--gens", type=int,
help="Number of generations to run.",
default=1)
parser.add_argument("-p", "--processes", type=int,
help="Number of worker processes allowed to run simultaneously.",
default=constants.default_cpu_count)
return parser
def save_generation(generation, filename):
if len(filename) <= 2 or filename[len(filename)-2: len(filename)] != ".p":
filename = filename + ".p"
try:
pickle.dump(generation, open(filename, "wb"))
except IOError:
print "Could not save", filename
sys.exit(1)
def load_generation(filename):
if len(filename) <= 2 or filename[len(filename)-2: len(filename)] != ".p":
filename = filename + ".p"
try:
generation = pickle.load(open(filename, "rb"))
print "Resuming from", filename
print "Last Generation Processed:", generation.num
score_str = "Sorted Scores"
for individual in generation.population:
score_str = score_str + " : {0}".format(individual.score)
print score_str
return generation
except IOError:
print "Could not open", filename + ",", "returning new Generation"
generation = Generation()
generation.populate()
return generation
def initial_score_individuals(args):
individual = args[0]
gen = args[1]
options = rgrun.Options()
options.headless = True
options.quiet = 10
options.n_of_games = constants.games_per_scoring
individual.score = 0
players = [Player(robot=individual.get_robot()), None]
folder = os.getcwd()
folder = os.path.join(folder, "rgkit", "bots")
# AS PLAYER 1
# Play against the coded bots
for file_name in os.listdir(folder):
loc_path = os.path.join("rgkit", "bots", file_name)
#print "\t\tOpponent:", file_name
try:
players[1] = Player(file_name=loc_path)
except IOError:
print "Error opening", loc_path
sys.exit(1)
results = rgrun.Runner(players=players, options=options).run()
individual.score += sum(p1 > p2 for p1, p2 in results)
# Play against other elites in generation
is_elite = (gen.population.index(individual) < constants.elite_size)
if is_elite:
individual.score += 1 # Free win for being in elite
for elite_idx in range(constants.elite_size):
if individual is not gen.population[elite_idx]:
players[1] = Player(name="individual",
robot=gen.population[elite_idx].get_robot())
results = rgrun.Runner(players=players, options=options).run()
individual.score += sum(p1 > p2 for p1, p2 in results)
else:
# No free win for non-elite contenders
for elite_idx in range(constants.elite_size):
players[1] = Player(name="individual",
robot=gen.population[elite_idx].get_robot())
results = rgrun.Runner(players=players, options=options).run()
individual.score += sum(p1 > p2 for p1, p2 in results)
# AS PLAYER 2
players = [None, Player(robot=individual.get_robot())]
# Play against the coded bots
for file_name in os.listdir(folder):
loc_path = os.path.join("rgkit", "bots", file_name)
#print "\t\tOpponent:", file_name
try:
players[0] = Player(file_name=loc_path)
except IOError:
print "Error opening", loc_path
sys.exit(1)
results = rgrun.Runner(players=players, options=options).run()
individual.score += sum(p2 > p1 for p1, p2 in results)
# Play against other elites in generation
is_elite = (gen.population.index(individual) < constants.elite_size)
if is_elite:
individual.score += 1 # Free win for being in elite
for elite_idx in range(constants.elite_size):
if individual is not gen.population[elite_idx]:
players[0] = Player(name="individual",
robot=gen.population[elite_idx].get_robot())
results = rgrun.Runner(players=players, options=options).run()
individual.score += sum(p2 > p1 for p1, p2 in results)
else:
# No free win for non-elite contenders
for elite_idx in range(constants.elite_size):
players[0] = Player(name="individual",
robot=gen.population[elite_idx].get_robot())
results = rgrun.Runner(players=players, options=options).run()
individual.score += sum(p2 > p1 for p1, p2 in results)
return individual.score
def break_ties(args):
"""Breaks ties between a group of Individuals by pitting them against each other.
In case of an absolute tie, the 'most elite' is favored.
This fills half of a matrix of scores. If we know A's score in (A vs B), then
we can determine B's score in (B vs A) without making them compete a second time."""
individual = args[0]
tied_individuals = args[1]
individual_idx = args[2]
options = rgrun.Options()
options.headless = True
options.quiet = 10
options.n_of_games = constants.games_per_scoring
players = [Player(robot=individual.get_robot()), None]
results = []
# AS PLAYER 1
# Play against other elites in generation
for x in range(individual_idx+1, len(tied_individuals)):
opponent = tied_individuals[x]
players[1] = Player(name="individual", robot=opponent.get_robot())
result = rgrun.Runner(players=players, options=options).run()
results.append(sum(p1 >= p2 for p1, p2 in result))
# AS PLAYER 2
players = [None, Player(robot=individual.get_robot())]
# Play against other elites in generation
for x in range(individual_idx+1, len(tied_individuals)):
opponent = tied_individuals[x]
players[0] = Player(name="individual", robot=opponent.get_robot())
result = rgrun.Runner(players=players, options=options).run()
results.append(sum(p2 >= p1 for p1, p2 in result))
return results
def worker(args):
# Init
gen = None
last_backup = 0
if args.load_file is not None:
gen = load_generation(args.load_file)
last_backup = gen.num
gen = gen.propagate()
else:
gen = Generation()
gen.populate()
save_file = args.save_file
# For each generation
for gen_num in range(1, args.gens+1):
# INITIAL SCORING
# Individual VS Elites and Coded Bots
individual_pool = Pool(processes=args.processes)
scores = individual_pool.map(initial_score_individuals,
[(x, gen) for x in gen.population])
sorted_scores = []
individual_pool.close()
individual_pool.join()
for x in range(len(gen.population)):
gen.population[x].score = scores[x]
sorted_scores.append(gen.population[x].score)
sorted_scores.sort()
sorted_scores.reverse()
# ELITE SCORING AND SORTING
# Break Ties that cross the ELITE/NON-ELITE cutoff
num_elites = constants.elite_size
if sorted_scores[num_elites-1] == sorted_scores[num_elites]:
tie_score = sorted_scores[num_elites]
tied_individuals = []
for individual in gen.population:
if individual.score == tie_score:
tied_individuals.append(individual)
# Break The Ties
individual_pool = Pool(processes=args.processes)
partial_scores = individual_pool.map(break_ties,
[(tied_individuals[x], tied_individuals, x)
for x in range(len(tied_individuals))])
individual_pool.close()
individual_pool.join()
# New scores are in range [tie_score, tie_score+1)
fill_scores_from_partial(tied_individuals, partial_scores)
scores = []
for x in range(len(gen.population)):
scores.append(gen.population[x].score)
gen.sort_by_score()
# Clobber if necessary
try:
progress_q.get_nowait()
except Queue.Empty:
pass
# If work is done or early stop is requested: save, inform, finish
if gen_num == args.gens or not early_end_q.empty():
progress_q.put(ProgressInfo(scores, gen.num, last_backup, save_file))
save_generation(gen, save_file)
break
# Otherwise: inform and move to next generation
else:
if (gen_num % constants.backup_frequency) == (constants.backup_frequency-1):
save_generation(gen, constants.default_backup)
last_backup = gen.num
progress_q.put(ProgressInfo(scores, gen.num, last_backup))
gen = gen.propagate()
def fill_scores_from_partial(tied_individuals, partial_scores):
"""Creates a full score matrix from a half-full one.
Each Individual is pitted (constants.games_per_scoring) times against all other
Individuals that come after the Individual in tied_individuals. This fills half of a
matrix such that (A vs B) is known, but (B vs A) is unknown. However, (B vs A) is
just (constants.games_per_scoring - (A vs B)).
This is only to be used in tandem with break_ties()."""
base_score = tied_individuals[0].score
num_individuals = len(tied_individuals)
for x in range(num_individuals):
sub_score = 0
# Add in known scores
for y in range((num_individuals - 1 - x)):
# As Player 1
sub_score += partial_scores[x][y]
# As Player 2
sub_score += partial_scores[x][y + (num_individuals-1-x)]
# Add in unknown scores. Note: partial_scores is not actual a half-empty
# matrix. It is an array of arrays of decreasing size. Thus the complicated
# indices.
for y in range(x):
# As Player 1
sub_score += (constants.games_per_scoring - partial_scores[y][x - (y+1)])
# As Player 2
sub_score += (constants.games_per_scoring -
partial_scores[y][x - (y+1) + (num_individuals-1-y)])
# New score is 'normalized' to be 0 <= x < 1
sub_score = (sub_score / (2.0 * len(tied_individuals) * constants.games_per_scoring))
tied_individuals[x].score = base_score + sub_score
def main():
# Get args: load or new; number of generations; TODO: More options
args = get_arg_parser().parse_args()
clear()
if args.load_file is not None:
# The printout for this is in the load_generation method
pass
else:
print "Starting evolution from scratch . . ."
work_thread = threading.Thread(target=worker, args=(args,))
work_thread.start()
gen = 0
scores = []
progress = None
# Status Printing Loop until work finished or SPACE pressed
ended_naturally = False
while not msvcrt.kbhit() or msvcrt.getch() != " ":
# Each finished generation fills the progress_q
try:
progress = progress_q.get_nowait()
if progress.save_file is not None:
ended_naturally = True
break
except Queue.Empty:
pass
print_status(progress)
time.sleep(1)
# If SPACE was pressed, inform work thread through early_end_q
if not ended_naturally:
progress = ProgressInfo()
clear()
print "Gracefully exiting and saving progress.\nPlease wait",
early_end_q.put(ProgressInfo(save_file=True))
# Wait for work thread to save progress
while progress.save_file is None:
try:
print ".",
progress = progress_q.get_nowait()
print "" # Get rid of a left over space from the above printout
except Queue.Empty:
time.sleep(1)
work_thread.join()
print_status(progress)
spinner = 0
def print_status(progress):
if progress is None:
return
clear()
if progress.save_file is None:
global spinner
print "Processing Generations ",
if spinner == 0:
print "-"
elif spinner == 1:
print "\\"
elif spinner == 2:
print "|"
elif spinner == 3:
print "/"
spinner = (spinner + 1) % 4
else:
print "Execution finished. Progress saved in", progress.save_file
print "Last Generation Processed:", progress.gen,
print "\tLast Backup Generation:", progress.last_backup
max_score = (2.0 * constants.games_per_scoring *
(constants.elite_size + constants.num_coded_opponents)) + 1.0
scores = []
score_str = " Scores"
for score in progress.scores:
scores.append((score / max_score) * 100.0)
for score in scores:
score_str = score_str + " : {:.1f}".format(score)
score_str = score_str + "\nSorted Scores"
scores.sort()
scores.reverse()
for score in scores:
score_str = score_str + " : {:.1f}".format(score)
print score_str
if progress.save_file is None:
print "Press Spacebar to save progress and exit."
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
#pass
if __name__ == '__main__':
main()
|
py | 1a34fd9629e249acbd71039735bc531ad57f4b4d | from enum import Enum
import numpy as np
from actions import Direction
class Car():
def __init__(self, car):
self.x = int(car['x'])
self.y = int(car['y'])
self.health = int(car['health'])
self.resources = int(car['resources'])
self.collided = bool(car['collided'])
self.killed = bool(car['killed'])
self.state = State(car['state'])
class StateType(Enum):
STOPPED = 1
MOVING = 2
class State():
def __init__(self, state):
if isinstance(state, dict):
self.state_type = StateType[list(state.keys())[0]]
self.direction = Direction[state[list(state.keys())[0]]]
else:
self.state_type = StateType[state]
self.direction = None
class ItemType(Enum):
PRODUCER = 1
RESOURCE = 2
BASE = 3
class CellType(Enum):
OPEN = 1
WALL = 2
class Cell():
def __init__(self, cell_type, x, y):
self.cell_type = cell_type
self.x = x
self.y = y
self.items = []
def set_items(self, items):
self.items = items
def is_wall(self):
return self.cell_type == CellType.WALL
def has_base(self):
for i in self.items:
if i.is_base():
return True
return False
def has_producer(self):
for i in self.items:
if i.is_producer():
return True
return False
def has_resource(self):
for i in self.items:
if i.is_resource():
return True
return False
class Item():
def __init__(self, item_type):
self.item_type = item_type
def is_resource(self):
return self.item_type == ItemType.RESOURCE
def is_producer(self):
return self.item_type == ItemType.PRODUCER
def is_base(self):
return self.item_type == ItemType.BASE
class Observation():
def __init__(self, previous):
self.previous_observation = previous
def parse(self, game_state, team_id):
self.map = game_state['map']
self.cars = {str(k): Car(v) for k, v in game_state['cars'].items()}
self.team = game_state['teams'][str(team_id)]
self.score = self.team['score']
self.car = self.cars[str(team_id)]
self.cells = []
y = 0
for r in self.map['cells']:
x = 0
# row = []
# self.cells.append(row)
for c in r:
cell = Cell(CellType[c['block']], x, y)
items = []
for i in c['items']:
if isinstance(i, dict):
item_type = ItemType[list(i.keys())[0]]
else:
item_type = ItemType[i]
items.append(Item(item_type))
# row.append(cell)
self.cells.append(cell)
x += 1
y += 1
self.size_x = x
self.size_y = y
def get_car_at(self, cell):
for car in self.cars.values():
if car.x == cell.x and car.y == cell.y:
return car
return None
def to_rl(self):
o = []
o.append(self.car.health)
o.append(self.car.resources)
o.append(self.car.collided)
o.append(self.car.killed)
c = []
for cell in self.cells:
c.append(int(cell.is_wall()))
c.append(int(cell.has_base()))
c.append(int(cell.has_producer()))
c.append(int(cell.has_resource()))
car = self.get_car_at(cell)
if car is not None:
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.NORTH))
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.SOUTH))
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.EAST))
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.WEST))
c.append(car.resources)
c.append(car.health)
c.append(car.collided)
else:
for _ in range(7):
c.append(0)
cell_index = self.car.x + self.car.y*self.size_y
# Center observations on the car
o += c[cell_index:] + c[:cell_index]
o = np.array(o, dtype=np.int8)
return o
# not used
def sample():
raise 'Observation.sample called! Not really tested/implemented...'
o = []
o.append(np.random.randint(0, 4))
o.append(np.random.randint(0, 100))
nb_cells = 20 * 24 # XXX
car_cell_index = np.random.randint(0, nb_cells) # XXX car can be on a wall
for i in nb_cells:
if i == car_cell_index:
o.append(1)
else:
o.append(0)
has_wall = np.random.randint(0, 2)
o.append(has_wall)
if has_wall == 0:
has_base = np.random.randint(0, 2)
has_producer = np.random.randint(0, 2)
has_resource = np.random.randint(0, 2)
else:
has_base = 0
has_producer = 0
has_resource = 0
o.append(has_base)
o.append(has_producer)
o.append(has_resource)
return o
def to_reward(self):
reward = 0
if self.car.state.state_type != StateType.MOVING:
reward -= 1
if self.previous_observation is not None:
reward += (self.score - self.previous_observation.score) * 100
# resources_bonus = self.car.resources - self.previous_observation.car.resources
# if resources_bonus > 0:
# reward += resources_bonus * 10
if self.car.collided: # XXX tune penalty depending on health?
reward -= 2
if self.car.killed:
reward += -10 - self.car.resources*100
return reward
|
py | 1a34fdb9e75a7978cd39cb52f1d28ab1db413109 | #!/usr/bin/env python3
import copy
import sys
class Board:
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.board = []
self.initial_fill()
def initial_fill(self):
for i in range(self.rows):
self.board.append(self.cols * [-1])
def is_safe(self, x, y):
"""
Check if coordinates are in range
:param x:
:param y:
:return: boolean
"""
return (0 <= x < self.rows) and (0 <= y < self.cols) and (self.board[x][y] == -1)
def __str__(self):
return "\n".join([", ".join(["%2i" % i for i in row]) for row in self.board])
class BoardKT(Board):
# move_x, move_y
moves = ((-1, 2), (-2, 1), (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1))
def __init__(self, rows, cols):
Board.__init__(self, rows, cols)
self.bs_board = copy.deepcopy(self.board)
self.bs_counter = 0
def solve_kt(self, start_x=0, start_y=0):
self.board[start_x][start_y] = 0
return self.fill_kt(start_x, start_y, 1)
def fill_kt(self, x, y, counter):
if counter > self.bs_counter: # find the closest solution
self.bs_counter = counter
self.bs_board = copy.deepcopy(self.board)
if counter == self.rows * self.cols:
return True
for move in self.moves: # iterate over all possible positions
move_x, move_y = x + move[0], y + move[1]
if self.is_safe(move_x, move_y):
self.board[move_x][move_y] = counter # set current position
# check next move
if self.fill_kt(move_x, move_y, counter + 1):
return True
else:
self.board[move_x][move_y] = -1 # backtrack
return False
def __str__(self):
return "\n".join([", ".join(["%2i" % i for i in row]) for row in self.bs_board])
def exit_error(message):
exit("ERROR: " + message)
if __name__ == "__main__":
if len(sys.argv) < 3:
exit_error(" usage is %s N M" % (sys.argv[0]))
start_x, start_y = 0, 0
if len(sys.argv) == 5:
start_x, start_y = int(sys.argv[3]), int(sys.argv[4])
N = int(sys.argv[1])
M = int(sys.argv[2])
print("Board size: %d X %d" % (N, M))
# validate http://faculty.olin.edu/~sadams/DM/ktpaper.pdf
if (N < 3 and M < 2) or (N < 2 and M < 3):
exit_error("Board is not wide enough to construct a tour")
board = BoardKT(N, M)
if board.solve_kt(start_x, start_y) is False:
print("Could not walk all the board")
print("Walked: %d" % board.bs_counter)
print(board)
|
py | 1a34fdd27d652d68494110c0186238667b56bdfd | from copy import deepcopy
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy
import scvi
from anndata import AnnData
from pyro import clear_param_store
from pyro.nn import PyroModule
from scvi import _CONSTANTS
from scvi.data._anndata import _setup_anndata, get_from_registry
from scvi.model.base import BaseModelClass, PyroSampleMixin, PyroSviTrainMixin
from scvi.utils import setup_anndata_dsp
from cell2location.models._cell2location_module import (
LocationModelLinearDependentWMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel,
)
from cell2location.models.base._pyro_base_loc_module import Cell2locationBaseModule
from cell2location.models.base._pyro_mixin import PltExportMixin, QuantileMixin
from cell2location.utils import select_slide
class Cell2location(QuantileMixin, PyroSampleMixin, PyroSviTrainMixin, PltExportMixin, BaseModelClass):
"""
Cell2location model. User-end model class. See Module class for description of the model (incl. math).
Parameters
----------
adata
spatial AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.
cell_state_df
pd.DataFrame with reference expression signatures for each gene (rows) in each cell type/population (columns).
use_gpu
Use the GPU?
**model_kwargs
Keyword args for :class:`~cell2location.models.LocationModelLinearDependentWMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel`
Examples
--------
TODO add example
>>>
"""
def __init__(
self,
adata: AnnData,
cell_state_df: pd.DataFrame,
model_class: Optional[PyroModule] = None,
detection_mean_per_sample: bool = False,
detection_mean_correction: float = 1.0,
**model_kwargs,
):
# in case any other model was created before that shares the same parameter names.
clear_param_store()
if not np.all(adata.var_names == cell_state_df.index):
raise ValueError("adata.var_names should match cell_state_df.index, find interecting variables/genes first")
# add index for each cell (provided to pyro plate for correct minibatching)
adata.obs["_indices"] = np.arange(adata.n_obs).astype("int64")
scvi.data.register_tensor_from_anndata(
adata,
registry_key="ind_x",
adata_attr_name="obs",
adata_key_name="_indices",
)
super().__init__(adata)
if model_class is None:
model_class = LocationModelLinearDependentWMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel
self.cell_state_df_ = cell_state_df
self.n_factors_ = cell_state_df.shape[1]
self.factor_names_ = cell_state_df.columns.values
if not detection_mean_per_sample:
# compute expected change in sensitivity (m_g in V1 or y_s in V2)
sc_total = cell_state_df.sum(0).mean()
sp_total = get_from_registry(self.adata, _CONSTANTS.X_KEY).sum(1).mean()
get_from_registry(adata, _CONSTANTS.BATCH_KEY)
self.detection_mean_ = (sp_total / model_kwargs.get("N_cells_per_location", 1)) / sc_total
self.detection_mean_ = self.detection_mean_ * detection_mean_correction
model_kwargs["detection_mean"] = self.detection_mean_
else:
# compute expected change in sensitivity (m_g in V1 and y_s in V2)
sc_total = cell_state_df.sum(0).mean()
sp_total = get_from_registry(self.adata, _CONSTANTS.X_KEY).sum(1)
batch = get_from_registry(self.adata, _CONSTANTS.BATCH_KEY).flatten()
sp_total = np.array([sp_total[batch == b].mean() for b in range(self.summary_stats["n_batch"])])
self.detection_mean_ = (sp_total / model_kwargs.get("N_cells_per_location", 1)) / sc_total
self.detection_mean_ = self.detection_mean_ * detection_mean_correction
model_kwargs["detection_mean"] = self.detection_mean_.reshape((self.summary_stats["n_batch"], 1)).astype(
"float32"
)
detection_alpha = model_kwargs.get("detection_alpha", None)
if detection_alpha is not None:
if type(detection_alpha) is dict:
batch_mapping = self.adata.uns["_scvi"]["categorical_mappings"]["_scvi_batch"]["mapping"]
self.detection_alpha_ = pd.Series(detection_alpha)[batch_mapping]
model_kwargs["detection_alpha"] = self.detection_alpha_.values.reshape(
(self.summary_stats["n_batch"], 1)
).astype("float32")
self.module = Cell2locationBaseModule(
model=model_class,
n_obs=self.summary_stats["n_cells"],
n_vars=self.summary_stats["n_vars"],
n_factors=self.n_factors_,
n_batch=self.summary_stats["n_batch"],
cell_state_mat=self.cell_state_df_.values.astype("float32"),
**model_kwargs,
)
self._model_summary_string = f'cell2location model with the following params: \nn_factors: {self.n_factors_} \nn_batch: {self.summary_stats["n_batch"]} '
self.init_params_ = self._get_init_params(deepcopy(locals()))
@staticmethod
@setup_anndata_dsp.dedent
def setup_anndata(
adata: AnnData,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
layer: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""
%(summary)s.
Parameters
----------
%(param_adata)s
%(param_batch_key)s
%(param_labels_key)s
%(param_layer)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
%(param_copy)s
Returns
-------
%(returns)s
"""
return _setup_anndata(
adata,
batch_key=batch_key,
labels_key=labels_key,
layer=layer,
categorical_covariate_keys=categorical_covariate_keys,
continuous_covariate_keys=continuous_covariate_keys,
copy=copy,
)
def train(
self, max_epochs: int = 30000, batch_size: int = None, train_size: float = 1, lr: float = 0.002, **kwargs
):
"""Train the model with useful defaults
Parameters
----------
max_epochs
Number of passes through the dataset. If `None`, defaults to
`np.min([round((20000 / n_cells) * 400), 400])`
train_size
Size of training set in the range [0.0, 1.0]. Use all data points in training because
we need to estimate cell abundance at all locations.
batch_size
Minibatch size to use during training. If `None`, no minibatching occurs and all
data is copied to device (e.g., GPU).
lr
Optimiser learning rate (default optimiser is :class:`~pyro.optim.ClippedAdam`).
Specifying optimiser via plan_kwargs overrides this choice of lr.
kwargs
Other arguments to scvi.model.base.PyroSviTrainMixin().train() method
"""
kwargs["max_epochs"] = max_epochs
kwargs["batch_size"] = batch_size
kwargs["train_size"] = train_size
kwargs["lr"] = lr
super().train(**kwargs)
def export_posterior(
self,
adata,
sample_kwargs: Optional[dict] = None,
export_slot: str = "mod",
add_to_obsm: list = ["means", "stds", "q05", "q95"],
):
"""
Summarise posterior distribution and export results (cell abundance) to anndata object:
1. adata.obsm: Estimated cell abundance as pd.DataFrames for each posterior distribution summary `add_to_obsm`,
posterior mean, sd, 5% and 95% quantiles (['means', 'stds', 'q05', 'q95']).
If export to adata.obsm fails with error, results are saved to adata.obs instead.
2. adata.uns: Posterior of all parameters, model name, date,
cell type names ('factor_names'), obs and var names.
Parameters
----------
adata
anndata object where results should be saved
sample_kwargs
arguments for self.sample_posterior (generating and summarising posterior samples), namely:
num_samples - number of samples to use (Default = 1000).
batch_size - data batch size (keep low enough to fit on GPU, default 2048).
use_gpu - use gpu for generating samples?
export_slot
adata.uns slot where to export results
add_to_obsm
posterior distribution summary to export in adata.obsm (['means', 'stds', 'q05', 'q95']).
Returns
-------
"""
sample_kwargs = sample_kwargs if isinstance(sample_kwargs, dict) else dict()
# generate samples from posterior distributions for all parameters
# and compute mean, 5%/95% quantiles and standard deviation
self.samples = self.sample_posterior(**sample_kwargs)
# TODO use add_to_obsm to determine which quantiles need to be computed,
# and if means and stds are not in the list - use quantile methods rather than sampling posterior
# export posterior distribution summary for all parameters and
# annotation (model, date, var, obs and cell type names) to anndata object
adata.uns[export_slot] = self._export2adata(self.samples)
# add estimated cell abundance as dataframe to obsm in anndata
# first convert np.arrays to pd.DataFrames with cell type and observation names
# data frames contain mean, 5%/95% quantiles and standard deviation, denoted by a prefix
for k in add_to_obsm:
sample_df = self.sample2df_obs(
self.samples,
site_name="w_sf",
summary_name=k,
name_prefix="cell_abundance",
)
try:
adata.obsm[f"{k}_cell_abundance_w_sf"] = sample_df.loc[adata.obs.index, :]
except ValueError:
# Catching weird error with obsm: `ValueError: value.index does not match parent’s axis 1 names`
adata.obs[sample_df.columns] = sample_df.loc[adata.obs.index, :]
return adata
def plot_spatial_QC_across_batches(self):
"""QC plot: compare total RNA count with estimated total cell abundance and detection sensitivity."""
adata = self.adata
# get batch key and the list of samples
batch_key = self.adata.uns["_scvi"]["categorical_mappings"]["_scvi_batch"]["original_key"]
samples = adata.obs[batch_key].unique()
# figure out plot shape
ncol = len(samples)
nrow = 3
fig, axs = plt.subplots(nrow, ncol, figsize=(1 + 4 * ncol, 1 + 4 * nrow))
if ncol == 1:
axs = axs.reshape((nrow, 1))
# compute total counts
# find data slot
x_dict = self.adata.uns["_scvi"]["data_registry"]["X"]
if x_dict["attr_name"] == "X":
use_raw = False
else:
use_raw = True
if x_dict["attr_name"] == "layers":
layer = x_dict["attr_key"]
else:
layer = None
# get data
if layer is not None:
x = adata.layers[layer]
else:
if not use_raw:
x = adata.X
else:
x = adata.raw.X
# compute total counts per location
cell_type = "total RNA counts"
adata.obs[cell_type] = np.array(x.sum(1)).flatten()
# figure out colour map scaling
vmax = np.quantile(adata.obs[cell_type].values, 0.992)
# plot, iterating across samples
for i, s in enumerate(samples):
sp_data_s = select_slide(adata, s, batch_key=batch_key)
scanpy.pl.spatial(
sp_data_s,
cmap="magma",
color=cell_type,
size=1.3,
img_key="hires",
alpha_img=1,
vmin=0,
vmax=vmax,
ax=axs[0, i],
show=False,
)
axs[0, i].title.set_text(cell_type + "\n" + s)
cell_type = "Total cell abundance (sum_f w_sf)"
adata.obs[cell_type] = adata.uns["mod"]["post_sample_means"]["w_sf"].sum(1).flatten()
# figure out colour map scaling
vmax = np.quantile(adata.obs[cell_type].values, 0.992)
# plot, iterating across samples
for i, s in enumerate(samples):
sp_data_s = select_slide(adata, s, batch_key=batch_key)
scanpy.pl.spatial(
sp_data_s,
cmap="magma",
color=cell_type,
size=1.3,
img_key="hires",
alpha_img=1,
vmin=0,
vmax=vmax,
ax=axs[1, i],
show=False,
)
axs[1, i].title.set_text(cell_type + "\n" + s)
cell_type = "RNA detection sensitivity (y_s)"
adata.obs[cell_type] = adata.uns["mod"]["post_sample_q05"]["detection_y_s"]
# figure out colour map scaling
vmax = np.quantile(adata.obs[cell_type].values, 0.992)
# plot, iterating across samples
for i, s in enumerate(samples):
sp_data_s = select_slide(adata, s, batch_key=batch_key)
scanpy.pl.spatial(
sp_data_s,
cmap="magma",
color=cell_type,
size=1.3,
img_key="hires",
alpha_img=1,
vmin=0,
vmax=vmax,
ax=axs[2, i],
show=False,
)
axs[2, i].title.set_text(cell_type + "\n" + s)
fig.tight_layout(pad=0.5)
return fig
|
py | 1a34ffb37f061b23cbc4e1b194fc5719df09f314 | import json
from packlib.base import ProxmoxAction
class NodesNodeCertificatesInfoAction(ProxmoxAction):
"""
Get information about node's certificates.
"""
def run(self, node, profile_name=None):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["node", node, "string"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.get(f"nodes/{node}/certificates/info", **proxmox_kwargs)
|
py | 1a34ffd344ace7b42d9a5ff99d4bd4ff1246faf3 | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.customers_home_page, name="customers-home"),
url(r'^login/$',views.login_page, name="customers-login"),
]
|
py | 1a34ffe549a5467ff9a088d61d3a996e8eca20e7 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 21:24:17 2018
@author: Zhaoyi.Shen
"""
import sys
sys.path.append('/home/z1s/py/lib/')
from lanczos_filter import lanczos_filter
import numpy as np
import scipy as sp
from scipy.signal import butter, lfilter, filtfilt
def lfca(x, cutoff, truncation, scale, **kwargs):
if x.ndim!=2:
return
if 'covtot' in kwargs.keys():
covtot = kwargs['covtot']
else:
covtot = np.cov(x,rowvar=False)
(n,p) = x.shape
if covtot.shape!=(p,p):
return
# center data
x = x - np.nanmean(x,0)[np.newaxis,...]
xs = x * np.transpose(scale)
# eigendecomposition of covariance matrix
#scale.shape = (1,p)
covtot = np.transpose(scale)*covtot*scale
pcvec, evl, rest = peigs(covtot, min(n-1,p))
trcovtot = np.trace(covtot)
#scale.shape = (p)
# percent of total sample variation accounted for by ead EOF
pvar = evl/trcovtot*100
# principal component time series
pcs = np.dot(xs, pcvec)
# return EOFs in original scaling as patterns (row vectors)
eof = np.transpose(pcvec)/np.transpose(scale)
# truncation of EOFs
ntr = truncation
# whitening transformation
f = np.sqrt(np.squeeze(evl)[0:ntr])
# get transformation matrices
s = np.dot(pcvec[:,0:ntr], np.diag(1./f))
sadj = np.dot(np.diag(f), np.transpose(pcvec[:,0:ntr]))
# filter data matrix
b,a = butter(5,1./cutoff,btype='low')
t = np.arange(1,n+1)
#t.shape = (1,n)
#t = np.transpose(t)
x_f = xs.copy()
for i in range(xs.shape[1]):
p = np.polyfit(t,xs[:,i],1)
tmp = xs[t-1,i]-p[0]*t-p[1]
tmp1 = np.concatenate((np.flipud(tmp),tmp,np.flipud(tmp)))
#tmp_filt = filtfilt(b,a,tmp)
tmp_filt = lanczos_filter(tmp1,1,1./cutoff)[0]
x_f[:,i] = tmp_filt[int(np.size(tmp_filt)/3):int(2*np.size(tmp_filt)/3)]+p[0]*t+p[1]
#x_f[:,i] = tmp_filt+p[0]*t+p[1]
# whiten variables
y = np.dot(x_f, s)
# slow covariance matrix of whitened variables
gamma = np.cov(y,rowvar=False)
# SVD of slow variance matrix
dummy, r, v = csvd(gamma)
# weight vectors and patterns
weights = scale * np.dot(s, v)
lfps = np.dot(np.transpose(v), sadj)/np.transpose(scale)
# choose signs of patterns, weights, eofs, and pcs
#scale.shape = (1,p)
for j in range(lfps.shape[0]):
if np.dot(lfps[j,:][np.newaxis,...], scale)<0:
lfps[j,:] = -lfps[j,:]
weights[:,j] = -weights[:,j]
for j in range(eof.shape[0]):
if np.dot(eof[j,:][np.newaxis,...], scale)<0:
eof[j,:] = -eof[j,:]
pcs[:,j] = -pcs[:,j]
#scale.shape = (p)
# low-frequency components
xs = xs/np.transpose(scale)
lfcs = np.dot(xs, weights)
# slow covariance of untruncated state space
cov_slow = np.cov(x_f,rowvar=False)
trcovslow = np.trace(cov_slow)
w = weights/scale
p = lfps*np.transpose(scale)
pw_diag = np.diag(np.dot(p,w))
slow_var = np.diag(np.dot(np.dot(p,cov_slow),w))/pw_diag
tot_var = np.diag(np.dot(np.dot(p,covtot),w))/pw_diag
pcvec_diag = np.diag(np.dot(np.transpose(pcvec),pcvec))
slow_var_eofs = np.diag(np.dot(np.dot(np.transpose(pcvec),cov_slow),pcvec))/pcvec_diag
tot_var_eofs = np.diag(np.dot(np.dot(np.transpose(pcvec),covtot),pcvec))/pcvec_diag
# slow variance and total variance in each LFC
pvar_slow = slow_var/trcovslow*100
pvar_lfc = tot_var/trcovtot*100
r_eofs = slow_var_eofs/tot_var_eofs
pvar_slow_eofs = slow_var_eofs/trcovslow*100
return lfcs, lfps, weights, r, pvar, pcs, eof, ntr, pvar_slow, pvar_lfc, r_eofs, pvar_slow_eofs
def peigs(a, rmax):
(m,n) = a.shape
if rmax>min(m,n):
rmax = min(m,n)
if rmax<min(m,n)/10.:
(d,v) = sp.sparse.linalg.eigs(a, rmax)
else:
(d,v) = np.linalg.eig(a)
if d.size>max(d.shape):
d = np.diag(d)
# ensure that eigenvalues are monotonically decreasing
i = np.argsort(-d)
d = -np.sort(-d)
v = v[:,i]
# estimate number of positive eigenvalues of a
d_min = max(d)*max(m,n)*np.spacing(1)
r = np.sum(d>d_min)
# discard eigenpairs with eigenvalues that are close to or less than zero
d = d[:r]
v = v[:,:r]
d = d[:]
return v, d, r
def csvd(a):
(m,n) = a.shape
if m>=n:
(u,s,v) = np.linalg.svd(a,0)
v = np.transpose(v)
else:
(v,s,u) = np.linalg.svd(a.transpose(),0)
u = np.transpose(u)
return u, s, v
|
py | 1a35000095d08fc4949c53dd142a084948198a8d | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Comment
# class CommentForm(forms.ModelForm):
# class Meta:
# model = Comment
# fields = ('user', 'path', 'text')
class CommentForm(forms.Form):
comment = forms.CharField(
widget=forms.Textarea(attrs={"placeholder": "Your comment or reply."})
)
def __init__(self, data=None, files=None, **kwargs):
super(CommentForm, self).__init__(data, files, kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.add_input(Submit('submit', 'Add Comment', css_class='btn btn-primary',)) |
py | 1a350006c33f63d8693e07603fd8d003e3df3793 | #!/usr/bin/python
import json
import os
import subprocess
device_id = os.environ.get('SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER')
if not device_id:
raise EnvironmentError('Specify SANITIZER_IOSSIM_TEST_DEVICE_IDENTIFIER to select which simulator to use.')
boot_cmd = ['xcrun', 'simctl', 'bootstatus', device_id, '-b']
subprocess.check_call(boot_cmd)
# TODO(rdar58118442): we start the simulator here, but we never tear it down
print(json.dumps({"env": {}}))
|
py | 1a350111c964c17e307adf5ec44248090526f453 | """
Author: Sijin Chen, Fudan University
Finished Date: 2021/06/04
"""
from .nn import NetworkWrapper
from collections import OrderedDict
from copy import deepcopy
from typing import Callable
import numpy as np
class Optimizer:
""" Meta class for optimizers """
def __init__(self, *args, **kwargs):
self.state_dict = OrderedDict()
def load_state_dict(self, state_dict: OrderedDict):
self.state_dict = deepcopy(state_dict)
def step(self,): raise NotImplementedError("Overwrite this!")
class lr_scheduler:
""" Meta class for adjusting optimizer learning rate """
def __init__(self, optimizer: Optimizer, *args, **kwargs):
self.optimizer = optimizer
self.state_dict = OrderedDict()
def load_state_dict(self, state_dict: OrderedDict):
self.state_dict = deepcopy(state_dict)
class SGD(Optimizer):
""" The optimizer class to update the parameters from the network """
def __init__(self,
model: NetworkWrapper,
lr: float,
momentum: float=None,
weight_decay: float=None
):
super(SGD, self).__init__()
self.model = model
self.lr = lr
self.momentum = momentum
self.weight_decay = weight_decay if weight_decay is not None else 0.
if self.momentum is not None:
for ModuleName, Layer in self.model.ModuleDict.items():
if "weight" in Layer.state_dict:
self.state_dict["{}-weight".format(ModuleName)] = 0.
if "bias" in Layer.state_dict:
self.state_dict["{}-bias".format(ModuleName)] = 0.
self.decay_rate = None
self.batch_size = None
def load_state_dict(self, state_dict: OrderedDict):
self.state_dict = deepcopy(state_dict)
def _step_without_momentum(self,):
""" Update the layers without momentum
Note:
Since the update without momentum is a speial version of momentum update
(momentum=0), we separate these two updating method to accelerate training.
"""
for ModuleName, Layer in self.model.ModuleDict.items():
self.batch_size = Layer["batch_size"]
self.decay_rate = 1 - (self.weight_decay/self.batch_size)
if "weight" in Layer.state_dict:
Layer.state_dict["weight"] = Layer["weight"]*self.decay_rate - self.lr*Layer.grad["weight"]
if "bias" in Layer.state_dict:
Layer.state_dict["bias"] = Layer["bias"]*self.decay_rate - self.lr*Layer.grad["bias"]
def _step_with_momentum(self,):
""" Update the layers with momentum update:
W(t+1) = W(t) - lr*dW + momentum*(W(t) - W(t-1))
"""
for ModuleName, Layer in self.model.ModuleDict.items():
self.batch_size = Layer["batch_size"]
self.decay_rate = 1-(self.weight_decay/self.batch_size)
if "weight" in Layer.state_dict:
cache = deepcopy(Layer["weight"])
momentum = cache - self.state_dict["{}-weight".format(ModuleName)]
Layer.state_dict["weight"] = cache*self.decay_rate - self.lr*Layer.grad["weight"] + self.momentum*momentum
self.state_dict["{}-weight".format(ModuleName)] = cache
if "bias" in Layer.state_dict:
cache = deepcopy(Layer["bias"])
momentum = cache - self.state_dict["{}-bias".format(ModuleName)]
Layer.state_dict["bias"] = cache*self.decay_rate - self.lr*Layer.grad["bias"] + self.momentum*momentum
self.state_dict["{}-bias".format(ModuleName)] = cache
def step(self,):
""" We implemented two different ways of updating parameters """
if self.momentum is not None:
self._step_with_momentum()
else:
self._step_without_momentum()
class LambdaLR(lr_scheduler):
""" Using lambda function to adjust learning rate """
# always update learning rate after running the entire epoch!
def __init__(self, optimizer: Optimizer, lr_lambda: Callable, verbose: bool=False):
super(lr_scheduler, self).__init__()
self.optimizer = optimizer
self.lr_lambda = lr_lambda
self.verbose = verbose
self.epoch = -1
def step(self,):
# adjusting learning rate using the lr-lambda function
lr = self.optimizer.lr
# Update learning rate
self.optimizer.lr = self.lr_lambda(lr)
if self.verbose is True:
print("Adjusting learning rate with lr_lambda from {:.4f} to {:.4f}".format(
lr, self.optimizer.lr))
self.epoch += 1
class CosineAnnealingLR(lr_scheduler):
""" Using lambda function to adjust learning rate """
# always update learning rate after running the entire epoch!
def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float=0.,
verbose: bool=False):
super(lr_scheduler, self).__init__()
self.optimizer = optimizer
self.T_max = T_max
self.T_cur = -1
self.eta_min = eta_min
self.eta_max = None
self.verbose = verbose
def step(self,):
""" Update the learning rate using
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right)
"""
lr = self.optimizer.lr
if self.T_cur == -1: self.eta_max = lr
# Cosine annealing function
new_lr = self.eta_min + 1/2*(self.eta_max-self.eta_min) * \
(1 + np.cos(self.T_cur/self.T_max*np.pi))
# Update learning rate
self.optimizer.lr = new_lr
if self.verbose is True:
print("Adjusting learning rate with cosine annealing from {:.4f} to {:.4f}".format(
lr, self.optimizer.lr))
self.T_cur += 1
|
py | 1a350120c971bb4447111915f4905c0bb0abab1d | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .models import linformer_roberta # noqa
|
py | 1a35014f8b12266711a7bee46ff8e7927fb75e02 | # USAGE
# python cluster_faces.py --encodings encodings.pickle
# import the necessary packages
from sklearn.cluster import DBSCAN
from imutils import build_montages
import numpy as np
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-j", "--jobs", type=int, default=-1,
help="# of parallel jobs to run (-1 will use all CPUs)")
args = vars(ap.parse_args())
# load the serialized face encodings + bounding box locations from
# disk, then extract the set of encodings to so we can cluster on
# them
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
data = np.array(data)
encodings = [d["encoding"] for d in data]
# cluster the embeddings
print("[INFO] clustering...")
clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
clt.fit(encodings)
# determine the total number of unique faces found in the dataset
labelIDs = np.unique(clt.labels_)
numUniqueFaces = len(np.where(labelIDs > -1)[0])
print("[INFO] # unique faces: {}".format(numUniqueFaces))
# loop over the unique face integers
for labelID in labelIDs:
# find all indexes into the `data` array that belong to the
# current label ID, then randomly sample a maximum of 25 indexes
# from the set
print("[INFO] faces for face ID: {}".format(labelID))
idxs = np.where(clt.labels_ == labelID)[0]
idxs = np.random.choice(idxs, size=min(25, len(idxs)),
replace=False)
# initialize the list of faces to include in the montage
faces = []
# loop over the sampled indexes
for i in idxs:
# load the input image and extract the face ROI
image = cv2.imread(data[i]["imagePath"])
(top, right, bottom, left) = data[i]["loc"]
face = image[top:bottom, left:right]
# force resize the face ROI to 96x96 and then add it to the
# faces montage list
face = cv2.resize(face, (96, 96))
faces.append(face)
# create a montage using 96x96 "tiles" with 5 rows and 5 columns
montage = build_montages(faces, (96, 96), (5, 5))[0]
# show the output montage
title = "Face ID #{}".format(labelID)
title = "Unknown Faces" if labelID == -1 else title
cv2.imshow(title, montage)
cv2.waitKey(0) |
py | 1a3501e748a2070e1dbf18e9e16d0d0cbb886ab5 | # -*- coding: utf-8 -*-
'''
Module used to access the esxdatacenter proxy connection methods
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
import salt.utils.platform
log = logging.getLogger(__name__)
__proxyenabled__ = ['esxdatacenter']
# Define the module's virtual name
__virtualname__ = 'esxdatacenter'
def __virtual__():
'''
Only work on proxy
'''
if salt.utils.platform.is_proxy():
return __virtualname__
return (False, 'Must be run on a proxy minion')
def get_details():
return __proxy__['esxdatacenter.get_details']()
|
py | 1a3502d3acd6a86f67c381785ce944727cfbb8ea | from django import forms
from .models import Questionnaire
class NewLandingForm(forms.Form):
label = forms.CharField(max_length=64, required=True)
questionnaire = forms.ModelChoiceField(
Questionnaire.objects.all(),
widget=forms.widgets.RadioSelect(),
empty_label=None,
required=True,
)
|
py | 1a3502d46aac0e4b56e8ea7cc5e461b04b9f4c3b | import tigerforecast
import jax.numpy as np
from tigerforecast.utils.optimizers import *
from environment.RealExperiment import RealExperiment as Experiment
import jax.random as random
from tigerforecast.utils import generate_key
from optimizers.RealONS import RealONS
from optimizers.RealOGD import RealOGD
from optimizers.Ftrl4 import Ftrl4
from optimizers.ADAFtrl import ADAFtrl
from losses.AE import ae
from losses.RealSE import se
from predictors.ArimaAutoregressor import ArimaAutoRegressor
from tigerforecast.problems.registration import problem_registry, problem_register, problem
from tigerforecast.problems.custom import register_custom_problem, CustomProblem
from tigerforecast.methods.registration import method_registry, method_register, method
from tigerforecast.methods.custom import CustomMethod, register_custom_method
import datetime
#joblib for parallelizing the runs
from joblib import Parallel, delayed
import multiprocessing
from pathlib import Path
import numpy as np
#########################################################################################################################################################
# #
# SE Settings #
# #
#########################################################################################################################################################
def settingSE1(p):
n = 20
T = 10000
dim = 10
print("Setting 1 SE started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting1-v0', {}, name = 'I')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 1 SE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=se),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=se),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=se),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=se),'n': dim}, name = 'FTRL-4_p_' + str(p) + "_d_" + str(c))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': se, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': se}, name = 'ARIMA-AO-Hedge')
print("Setting 1 SE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10)
exp.graph(save_as=store_directory + "papersetting1SE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, save_csv_path = store_directory + 'papersetting1SE')
def settingSE2(p):
n = 20
T = 10000
dim = 10
print("Setting 2 SE started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting2-v0', {}, name = 'II')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 2 SE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=se),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=se),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=se),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=se),'n': dim}, name = 'FTRL-4_p_' + str(p) + "_d_" + str(c))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': se, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': se}, name = 'ARIMA-AO-Hedge')
print("Setting 2 SE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10)
exp.graph(save_as=store_directory + "papersetting2SE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, save_csv_path = store_directory + 'papersetting2SE')
def settingSE3(p):
n = 20
T = 10000
dim = 10
print("Setting 3 SE started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting3-v0', {}, name = 'III')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 3 SE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=se),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=se),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=se),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=se),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': se, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': se}, name = 'ARIMA-AO-Hedge')
print("Setting 3 SE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10)
exp.graph(save_as=store_directory + "papersetting3SE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, save_csv_path = store_directory + 'papersetting3SE')
def settingSE4(p):
d = 1
n = 1
T = 5320
dim = 8
print("Setting 4 SE started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(timesteps = T, n_runs=n)
exp.add_problem('ExperimentSetting4-v0', {}, name = 'IV')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 4 SE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=se),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=se),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=se),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=se),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': se, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': se}, name = 'ARIMA-AO-Hedge')
print("Setting 4 SE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10)
exp.graph(save_as=store_directory + "papersetting4SE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, save_csv_path = store_directory + 'papersetting4SE')
def settingSE5(p):
d = 1
n = 1
T = 590
dim = 10
print("Setting 5 SE started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting5-v0', {}, name = 'V')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 5 SE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=se),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=se),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=se),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=se),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': se, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': se}, name = 'ARIMA-AO-Hedge')
print("Setting 5 SE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10)
exp.graph(save_as=store_directory + "papersetting5SE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, save_csv_path = store_directory + 'papersetting5SE')
def settingSE6(p):
dim = 4
n = 1
T = 419
print("Setting 6 SE started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting6-v0', {}, name = 'VI')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 6 SE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=se),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=se),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': d, 'optimizer': ADAFtrl(loss=se),'n': dim}, name = 'ADAFtrl_p_' + str(p) + "_d_" + str(d))
exp.add_method('ArimaAR', {'p' : p, 'd': d, 'optimizer': Ftrl4(loss=se),'n': dim}, name = 'FTRL-4_p_' + str(p) + "_d_" + str(d))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': se, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': se}, name = 'ARIMA-AO-Hedge')
print("Setting 6 SE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10)
exp.graph(save_as=store_directory + "papersetting6SE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, save_csv_path = store_directory + 'papersetting6SE')
#########################################################################################################################################################
# #
# AE Settings #
# #
#########################################################################################################################################################
def settingAE1(p):
n = 20
T = 10000
dim = 10
exp = Experiment()
exp.initialize(metrics = ['ae'], timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting1-v0', {}, name = 'I')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 1 AE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=ae),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=ae),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=ae),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': c, 'optimizer': ADAFtrl(loss=ae),'n': dim}, name = 'ADAFtrl_p_' + str(p) + '_d_' + str(c))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': ae, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': ae}, name = 'ARIMA-AO-Hedge')
print("Setting 1 AE finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10, metric = 'ae')
exp.graph(save_as=store_directory + "papersetting1_AE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, metric = 'ae', save_csv_path = store_directory + 'papersetting1_AE')
def settingAE2(p):
n = 20
T = 10000
dim = 10
print("Setting ae 2 started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(metrics = ['ae'], timesteps = T, n_runs=n)
exp.add_problem('ExperimentSetting2-v0', name = 'II')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 2 AE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=ae),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=ae),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=ae),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=ae),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': ae, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': ae}, name = 'ARIMA-AO-Hedge')
print("Setting AE 2 finished at " + str(datetime.datetime.now()))
exp.scoreboard(n_digits = 10, metric = 'ae')
exp.graph(save_as=store_directory + "papersetting2_AE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, metric = 'ae', save_csv_path = store_directory + 'papersetting2_AE')
def settingAE3(p):
n = 20
T = 10000
dim = 10
print("Setting ae 3 started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(metrics = ['ae'], timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting3-v0', name = 'III')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 3 AE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=ae),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=ae),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=ae),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=ae),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': ae, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': ae}, name = 'ARIMA-AO-Hedge')
print("Setting AE 3 finished at " + str(datetime.datetime.now()))
exp.scoreboard(n_digits = 10, metric = 'ae')
exp.graph(save_as=store_directory + "papersetting3_AE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, metric = 'ae', save_csv_path = store_directory + 'papersetting3_AE')
def settingAE4(p):
d = 1
n = 1
T = 5320
dim = 8
print("Setting ae 4 started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(metrics = ['ae'], timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting4-v0', name = 'IV')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 4 AE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=ae),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=ae),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=ae),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=ae),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': ae, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': ae}, name = 'ARIMA-AO-Hedge')
print("Setting AE 4 finished at " + str(datetime.datetime.now()))
exp.scoreboard(n_digits = 10, metric = 'ae')
exp.graph(save_as=store_directory + "papersetting4_AE.pdf", avg_regret = True, size=15, start_time = 100, metric = 'ae', dpi = 100, save_csv_path = store_directory + 'papersetting4_AE')
def settingAE5(p):
d = 1
n = 1
T = 590
dim = 10
print("Setting ae 5 started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(metrics = ['ae'], timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting5-v0', name = 'V')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 5 AE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=ae),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=ae),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': ADAFtrl(loss=ae),'n': dim}, name = 'ADAFtrl_p_' + str(p))
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': Ftrl4(loss=ae),'n': dim}, name = 'FTRL-4_p_' + str(p))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': ae, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': ae}, name = 'ARIMA-AO-Hedge')
print("Setting AE 5 finished at " + str(datetime.datetime.now()), flush = True)
exp.scoreboard(n_digits = 10, metric = 'ae')
exp.graph(save_as=store_directory + "papersetting5_AE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, metric = 'ae', save_csv_path = store_directory + 'papersetting5_AE')
def settingAE6(p):
dim = 4
n = 1
T = 419
print("Setting ae 6 started at " + str(datetime.datetime.now()), flush=True)
exp = Experiment()
exp.initialize(metrics = ['ae'], timesteps = T, n_runs=n)
#problem
exp.add_problem('ExperimentSetting6-v0', name = 'VI')
for eta in [1, 0.1, 0.01, 0.001]:
for c in [1, 2, 4]:
print("Setting 6 AE c = " + str(c) + " started at " + str(datetime.datetime.now()), flush = True)
hyp = {'lr': eta, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealOGD(hyperparameters=hyp, loss=ae),'n': dim}, name = 'OGD_p_' + str(p) + "_lr_" + str(eta) + '_c_' + str(c))
for eps in [1, 0.1, 0.01, 0.001]:
hyp = {'eta': eta, 'eps': eps, 'c':c}
exp.add_method('ArimaAR', {'p' : p, 'd': 1, 'optimizer': RealONS(hyperparameters=hyp, loss=ae),'n': dim}, name = 'ONS_p_' + str(p) + "_eta_" + str(eta) + "_eps_" + str(eps) + '_c_' + str(c))
exp.add_method('ArimaAR', {'p' : p, 'd': d, 'optimizer': ADAFtrl(loss=ae),'n': dim}, name = 'ADAFtrl_p_' + str(p) + "_d_" + str(d))
exp.add_method('ArimaAR', {'p' : p, 'd': d, 'optimizer': Ftrl4(loss=ae),'n': dim}, name = 'FTRL-4_p_' + str(p) + "_d_" + str(d))
exp.add_method('JamilAlgo1', {'n': dim, 'loss': ae, 'eta': np.sqrt(T * np.log(96))}, name = 'jamil_aggregation')
exp.add_method('ARIMAAOHedge', {'n': dim, 'loss': ae}, name = 'ARIMA-AO-Hedge')
print("Setting ae 6 finished at " + str(datetime.datetime.now()))
exp.scoreboard(n_digits = 10, metric = 'ae')
exp.graph(save_as=store_directory + "papersetting6_AE.pdf", avg_regret = True, size=15, start_time = 100, dpi = 100, metric = 'ae', save_csv_path = store_directory + 'papersetting6_AE')
def run_experiment(i, p):
method_register(
id='ArimaAR',
entry_point='predictors.ArimaAutoregressor:ArimaAutoRegressor',
)
method_register(
id='JamilAlgo1',
entry_point='predictors.Jamil_algo1:JamilAlgo1',
)
method_register(
id='ARIMAAOHedge',
entry_point='predictors.ARIMA_AO_Hedge:ARIMAAOHedge',
)
problem_register(
id='ExperimentSetting1-v0',
entry_point='problems.RevisionExperimentSetting1:RevisionExperimentSetting1',
)
problem_register(
id='ExperimentSetting2-v0',
entry_point='problems.RevisionExperimentSetting2:RevisionExperimentSetting2',
)
problem_register(
id='ExperimentSetting3-v0',
entry_point='problems.RevisionExperimentSetting3:RevisionExperimentSetting3',
)
problem_register(
id='ExperimentSetting4-v0',
entry_point='problems.RevisionExperimentSettingReal1:RevisionExperimentSettingReal1',
)
problem_register(
id='ExperimentSetting5-v0',
entry_point='problems.RevisionExperimentSettingReal2:RevisionExperimentSettingReal2',
)
problem_register(
id='ExperimentSetting6-v0',
entry_point='problems.RevisionExperimentSettingReal3:RevisionExperimentSettingReal3',
)
if(i == 1):
settingSE1(p)
elif(i == 2):
settingSE2(p)
elif(i == 3):
settingSE3(p)
elif(i == 4):
settingSE4(p)
elif(i == 5):
settingSE5(p)
elif(i == 6):
settingSE6(p)
elif(i == 11):
settingAE1(p)
elif(i == 12):
settingAE2(p)
elif(i == 13):
settingAE3(p)
elif(i == 14):
settingAE4(p)
elif(i == 15):
settingAE5(p)
elif(i == 16):
settingAE6(p)
store_directory = "experiments_results/"
if __name__ == '__main__':
Path(store_directory).mkdir(parents=True, exist_ok=True)
tasklist = [(1, 8), (1, 16), (1, 32), (1, 64),
(2, 8), (2, 16), (2, 32), (2, 64),
(3, 8), (3, 16), (3, 32), (3, 64),
(4, 8), (4, 16), (4, 32), (4, 64),
(5, 8), (5, 16), (5, 32), (5, 64),
(6, 8), (6, 16), (6, 32), (6, 64),
(11, 8), (11, 16), (11, 32), (11, 64),
(12, 8), (12, 16), (12, 32), (12, 64),
(13, 8), (13, 16), (13, 32), (13, 64),
(14, 8), (14, 16), (14, 32), (14, 64),
(15, 8), (15, 16), (15, 32), (15, 64),
(16, 8), (16, 16), (16, 32), (16, 64)
]
results = Parallel(n_jobs=len(tasklist))(delayed(run_experiment)(i, p) for (i, p) in tasklist) |
py | 1a3503fc665c7cd55d7651e161821c19c25925cb | """
Copyright (c) 2019-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from openvino.runtime import Core, get_version, PartialShape
class InferenceEngine:
def __init__(self, net_model_xml_path, device, stride):
self.device = device
self.stride = stride
log.info('OpenVINO Inference Engine')
log.info('\tbuild: {}'.format(get_version()))
self.core = Core()
log.info('Reading model {}'.format(net_model_xml_path))
self.model = self.core.read_model(net_model_xml_path)
required_output_keys = {'features', 'heatmaps', 'pafs'}
for output_tensor_name in required_output_keys:
try:
self.model.output(output_tensor_name)
except RuntimeError:
raise RuntimeError("The demo supports only topologies with the following output keys: {}".format(
', '.join(required_output_keys)))
self.input_tensor_name = self.model.inputs[0].get_any_name()
compiled_model = self.core.compile_model(self.model, self.device)
self.infer_request = compiled_model.create_infer_request()
log.info('The model {} is loaded to {}'.format(net_model_xml_path, self.device))
def infer(self, img):
img = img[0:img.shape[0] - (img.shape[0] % self.stride),
0:img.shape[1] - (img.shape[1] % self.stride)]
n, c, h, w = self.model.inputs[0].shape
if h != img.shape[0] or w != img.shape[1]:
self.model.reshape({self.input_tensor_name: PartialShape([n, c, img.shape[0], img.shape[1]])})
compiled_model = self.core.compile_model(self.model, self.device)
self.infer_request = compiled_model.create_infer_request()
img = np.transpose(img, (2, 0, 1))[None, ]
self.infer_request.infer({self.input_tensor_name: img})
inference_result = {name: self.infer_request.get_tensor(name).data[:] for name in {'features', 'heatmaps', 'pafs'}}
inference_result = (inference_result['features'][0],
inference_result['heatmaps'][0], inference_result['pafs'][0])
return inference_result
|
py | 1a3504d2cd84aceb8790db9d2f47ea595908c9af | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetApplicationGatewayResult',
'AwaitableGetApplicationGatewayResult',
'get_application_gateway',
]
@pulumi.output_type
class GetApplicationGatewayResult:
"""
Application gateway resource.
"""
def __init__(__self__, authentication_certificates=None, autoscale_configuration=None, backend_address_pools=None, backend_http_settings_collection=None, custom_error_configurations=None, enable_fips=None, enable_http2=None, etag=None, firewall_policy=None, frontend_ip_configurations=None, frontend_ports=None, gateway_ip_configurations=None, http_listeners=None, id=None, identity=None, location=None, name=None, operational_state=None, probes=None, provisioning_state=None, redirect_configurations=None, request_routing_rules=None, resource_guid=None, rewrite_rule_sets=None, sku=None, ssl_certificates=None, ssl_policy=None, tags=None, trusted_root_certificates=None, type=None, url_path_maps=None, web_application_firewall_configuration=None, zones=None):
if authentication_certificates and not isinstance(authentication_certificates, list):
raise TypeError("Expected argument 'authentication_certificates' to be a list")
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if autoscale_configuration and not isinstance(autoscale_configuration, dict):
raise TypeError("Expected argument 'autoscale_configuration' to be a dict")
pulumi.set(__self__, "autoscale_configuration", autoscale_configuration)
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if backend_http_settings_collection and not isinstance(backend_http_settings_collection, list):
raise TypeError("Expected argument 'backend_http_settings_collection' to be a list")
pulumi.set(__self__, "backend_http_settings_collection", backend_http_settings_collection)
if custom_error_configurations and not isinstance(custom_error_configurations, list):
raise TypeError("Expected argument 'custom_error_configurations' to be a list")
pulumi.set(__self__, "custom_error_configurations", custom_error_configurations)
if enable_fips and not isinstance(enable_fips, bool):
raise TypeError("Expected argument 'enable_fips' to be a bool")
pulumi.set(__self__, "enable_fips", enable_fips)
if enable_http2 and not isinstance(enable_http2, bool):
raise TypeError("Expected argument 'enable_http2' to be a bool")
pulumi.set(__self__, "enable_http2", enable_http2)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if firewall_policy and not isinstance(firewall_policy, dict):
raise TypeError("Expected argument 'firewall_policy' to be a dict")
pulumi.set(__self__, "firewall_policy", firewall_policy)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if frontend_ports and not isinstance(frontend_ports, list):
raise TypeError("Expected argument 'frontend_ports' to be a list")
pulumi.set(__self__, "frontend_ports", frontend_ports)
if gateway_ip_configurations and not isinstance(gateway_ip_configurations, list):
raise TypeError("Expected argument 'gateway_ip_configurations' to be a list")
pulumi.set(__self__, "gateway_ip_configurations", gateway_ip_configurations)
if http_listeners and not isinstance(http_listeners, list):
raise TypeError("Expected argument 'http_listeners' to be a list")
pulumi.set(__self__, "http_listeners", http_listeners)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if operational_state and not isinstance(operational_state, str):
raise TypeError("Expected argument 'operational_state' to be a str")
pulumi.set(__self__, "operational_state", operational_state)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if redirect_configurations and not isinstance(redirect_configurations, list):
raise TypeError("Expected argument 'redirect_configurations' to be a list")
pulumi.set(__self__, "redirect_configurations", redirect_configurations)
if request_routing_rules and not isinstance(request_routing_rules, list):
raise TypeError("Expected argument 'request_routing_rules' to be a list")
pulumi.set(__self__, "request_routing_rules", request_routing_rules)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if rewrite_rule_sets and not isinstance(rewrite_rule_sets, list):
raise TypeError("Expected argument 'rewrite_rule_sets' to be a list")
pulumi.set(__self__, "rewrite_rule_sets", rewrite_rule_sets)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if ssl_certificates and not isinstance(ssl_certificates, list):
raise TypeError("Expected argument 'ssl_certificates' to be a list")
pulumi.set(__self__, "ssl_certificates", ssl_certificates)
if ssl_policy and not isinstance(ssl_policy, dict):
raise TypeError("Expected argument 'ssl_policy' to be a dict")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trusted_root_certificates and not isinstance(trusted_root_certificates, list):
raise TypeError("Expected argument 'trusted_root_certificates' to be a list")
pulumi.set(__self__, "trusted_root_certificates", trusted_root_certificates)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if url_path_maps and not isinstance(url_path_maps, list):
raise TypeError("Expected argument 'url_path_maps' to be a list")
pulumi.set(__self__, "url_path_maps", url_path_maps)
if web_application_firewall_configuration and not isinstance(web_application_firewall_configuration, dict):
raise TypeError("Expected argument 'web_application_firewall_configuration' to be a dict")
pulumi.set(__self__, "web_application_firewall_configuration", web_application_firewall_configuration)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]:
"""
Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="autoscaleConfiguration")
def autoscale_configuration(self) -> Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse']:
"""
Autoscale Configuration.
"""
return pulumi.get(self, "autoscale_configuration")
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter(name="backendHttpSettingsCollection")
def backend_http_settings_collection(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]:
"""
Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_http_settings_collection")
@property
@pulumi.getter(name="customErrorConfigurations")
def custom_error_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]:
"""
Custom error configurations of the application gateway resource.
"""
return pulumi.get(self, "custom_error_configurations")
@property
@pulumi.getter(name="enableFips")
def enable_fips(self) -> Optional[bool]:
"""
Whether FIPS is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_fips")
@property
@pulumi.getter(name="enableHttp2")
def enable_http2(self) -> Optional[bool]:
"""
Whether HTTP2 is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_http2")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firewallPolicy")
def firewall_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference of the FirewallPolicy resource.
"""
return pulumi.get(self, "firewall_policy")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]:
"""
Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="frontendPorts")
def frontend_ports(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]:
"""
Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ports")
@property
@pulumi.getter(name="gatewayIPConfigurations")
def gateway_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]:
"""
Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "gateway_ip_configurations")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]:
"""
Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The identity of the application gateway, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationalState")
def operational_state(self) -> str:
"""
Operational state of the application gateway resource.
"""
return pulumi.get(self, "operational_state")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]:
"""
Probes of the application gateway resource.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfigurations")
def redirect_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']]:
"""
Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "redirect_configurations")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]:
"""
Request routing rules of the application gateway resource.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the application gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="rewriteRuleSets")
def rewrite_rule_sets(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']]:
"""
Rewrite rules for the application gateway resource.
"""
return pulumi.get(self, "rewrite_rule_sets")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ApplicationGatewaySkuResponse']:
"""
SKU of the application gateway resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslCertificates")
def ssl_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]:
"""
SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_certificates")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional['outputs.ApplicationGatewaySslPolicyResponse']:
"""
SSL policy of the application gateway resource.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedRootCertificates")
def trusted_root_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']]:
"""
Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_root_certificates")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]:
"""
URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "url_path_maps")
@property
@pulumi.getter(name="webApplicationFirewallConfiguration")
def web_application_firewall_configuration(self) -> Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']:
"""
Web application firewall configuration.
"""
return pulumi.get(self, "web_application_firewall_configuration")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting where the resource needs to come from.
"""
return pulumi.get(self, "zones")
class AwaitableGetApplicationGatewayResult(GetApplicationGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationGatewayResult(
authentication_certificates=self.authentication_certificates,
autoscale_configuration=self.autoscale_configuration,
backend_address_pools=self.backend_address_pools,
backend_http_settings_collection=self.backend_http_settings_collection,
custom_error_configurations=self.custom_error_configurations,
enable_fips=self.enable_fips,
enable_http2=self.enable_http2,
etag=self.etag,
firewall_policy=self.firewall_policy,
frontend_ip_configurations=self.frontend_ip_configurations,
frontend_ports=self.frontend_ports,
gateway_ip_configurations=self.gateway_ip_configurations,
http_listeners=self.http_listeners,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
operational_state=self.operational_state,
probes=self.probes,
provisioning_state=self.provisioning_state,
redirect_configurations=self.redirect_configurations,
request_routing_rules=self.request_routing_rules,
resource_guid=self.resource_guid,
rewrite_rule_sets=self.rewrite_rule_sets,
sku=self.sku,
ssl_certificates=self.ssl_certificates,
ssl_policy=self.ssl_policy,
tags=self.tags,
trusted_root_certificates=self.trusted_root_certificates,
type=self.type,
url_path_maps=self.url_path_maps,
web_application_firewall_configuration=self.web_application_firewall_configuration,
zones=self.zones)
def get_application_gateway(application_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationGatewayResult:
"""
Application gateway resource.
:param str application_gateway_name: The name of the application gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationGatewayName'] = application_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190901:getApplicationGateway', __args__, opts=opts, typ=GetApplicationGatewayResult).value
return AwaitableGetApplicationGatewayResult(
authentication_certificates=__ret__.authentication_certificates,
autoscale_configuration=__ret__.autoscale_configuration,
backend_address_pools=__ret__.backend_address_pools,
backend_http_settings_collection=__ret__.backend_http_settings_collection,
custom_error_configurations=__ret__.custom_error_configurations,
enable_fips=__ret__.enable_fips,
enable_http2=__ret__.enable_http2,
etag=__ret__.etag,
firewall_policy=__ret__.firewall_policy,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
frontend_ports=__ret__.frontend_ports,
gateway_ip_configurations=__ret__.gateway_ip_configurations,
http_listeners=__ret__.http_listeners,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
operational_state=__ret__.operational_state,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
redirect_configurations=__ret__.redirect_configurations,
request_routing_rules=__ret__.request_routing_rules,
resource_guid=__ret__.resource_guid,
rewrite_rule_sets=__ret__.rewrite_rule_sets,
sku=__ret__.sku,
ssl_certificates=__ret__.ssl_certificates,
ssl_policy=__ret__.ssl_policy,
tags=__ret__.tags,
trusted_root_certificates=__ret__.trusted_root_certificates,
type=__ret__.type,
url_path_maps=__ret__.url_path_maps,
web_application_firewall_configuration=__ret__.web_application_firewall_configuration,
zones=__ret__.zones)
|
py | 1a3504d8e59770dfa3a8987f90b2668a2300dbf4 | import pytest
from page_objects.home_page import HomePage
from page_objects.item_details import ItemDetails
class TestProductDetails:
@pytest.mark.skip("Skip for now")
def test_product_details(self, driver):
s = ItemDetails(driver)
h = HomePage(driver)
h.navigate_to_homepage()
s.item_search()
s.product_details()
s.product_review_form()
s.add_item_to_cart()
|
bzl | 1a35054063343bfd201dfe31779c7537ad58cd21 | LITHO_ROOT = "//"
LITHO_VISIBILITY = [
"PUBLIC",
]
LITHO_STUBS_VISIBILITY = [
"//litho-core/...",
]
LITHO_TESTING_UTIL_VISIBILITY = [
"PUBLIC",
]
LITHO_IS_OSS_BUILD = True
def make_dep_path(pth):
return LITHO_ROOT + pth
LITHO_ROOT_TARGET = make_dep_path(":components")
# Java source
LITHO_JAVA_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho:litho")
LITHO_ANNOTATIONS_TARGET = make_dep_path("litho-annotations/src/main/java/com/facebook/litho/annotations:annotations")
LITHO_CONFIG_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/config:config")
LITHO_BOOST_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/boost:boost")
LITHO_DISPLAYLISTSTUBS_TARGET = make_dep_path("litho-stubs:stubs")
LITHO_VIEWCOMPAT_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/viewcompat:viewcompat")
LITHO_UTILS_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/utils:utils")
LITHO_WIDGET_ACCESSIBILITIES_TARGET = make_dep_path("litho-widget/src/main/java/com/facebook/litho/widget/accessibility:accessibility")
LITHO_WIDGET_TARGET = make_dep_path("litho-widget/src/main/java/com/facebook/litho/widget:widget")
LITHO_LITHO_FRESCO_TARGET = make_dep_path("litho-fresco/src/main/java/com/facebook/litho/fresco:fresco")
LITHO_STATS_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/stats:stats")
LITHO_TESTING_CORE_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho:litho")
LITHO_TESTING_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing:testing")
LITHO_TESTING_ASSERTJ_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing/assertj:assertj")
LITHO_TESTING_HELPER_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing/helper:helper")
LITHO_TESTING_SUBCOMPONENTS_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing/subcomponents:subcomponents")
LITHO_TESTING_WIDGET_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/widget:widget")
LITHO_TESTING_ESPRESSO_TARGET = make_dep_path("litho-espresso/src/main/java/com/facebook/litho/testing/espresso:espresso")
LITHO_TEST_RES = make_dep_path("litho-it/src/main:res")
LITHO_SECTIONS_TARGET = make_dep_path("litho-sections-core/src/main/java/com/facebook/litho/sections:sections")
LITHO_SECTIONS_COMMON_TARGET = make_dep_path("litho-sections-core/src/main/java/com/facebook/litho/sections/common:common")
LITHO_SECTIONS_WIDGET_TARGET = make_dep_path("litho-sections-widget/src/main/java/com/facebook/litho/sections/widget:widget")
LITHO_SECTIONS_ANNOTATIONS_TARGET = make_dep_path("litho-sections-annotations/src/main/java/com/facebook/litho/sections/annotations:annotations")
LITHO_SECTIONS_PROCESSOR_TARGET = make_dep_path("litho-sections-processor/src/main/java/com/facebook/litho/sections/specmodels/processor:processor")
LITHO_SECTIONS_CONFIG_TARGET = make_dep_path("litho-sections-core/src/main/java/com/facebook/litho/sections/config:config")
LITHO_FBJNI_JAVA_TARGET = make_dep_path("lib/fbjni/src/main/java/com/facebook/jni:jni")
# Test source
LITHO_TEST_TARGET = make_dep_path("litho-it/src/test/java/com/facebook/litho:litho")
# Java source with local upstream
LITHO_PROGUARDANNOTATIONS_TARGET = make_dep_path("litho-annotations/src/main/java/com/facebook/proguard/annotations:annotations")
# Resources
LITHO_RES_TARGET = make_dep_path("litho-core:res")
# Libraries
LITHO_INFERANNOTATIONS_TARGET = make_dep_path("lib/infer-annotations:infer-annotations")
LITHO_JSR_TARGET = make_dep_path("lib/jsr-305:jsr-305")
LITHO_ANDROIDSUPPORT_TARGET = make_dep_path("lib/android-support:android-support")
LITHO_ANDROIDSUPPORT_RECYCLERVIEW_TARGET = make_dep_path("lib/android-support:android-support-recyclerview")
LITHO_ANDROIDSUPPORT_APPCOMPAT_TARGET = make_dep_path("lib/appcompat:appcompat")
LITHO_ANDROIDSUPPORT_TESTING_TARGET = make_dep_path("lib/android-support:android-support-testing")
LITHO_YOGA_TARGET = make_dep_path("lib/yoga:yoga")
LITHO_YOGAJNI_TARGET = make_dep_path("lib/yogajni:jni")
LITHO_BUILD_CONFIG_TARGET = make_dep_path(":build_config")
LITHO_COMMONS_CLI_TARGET = make_dep_path("lib/commons-cli:commons-cli")
LITHO_TEXTLAYOUTBUILDER_TARGET = make_dep_path("lib/textlayoutbuilder:textlayoutbuilder")
LITHO_JAVAPOET_TARGET = make_dep_path("lib/javapoet:javapoet")
LITHO_FBCORE_TARGET = make_dep_path("lib/fbcore:fbcore")
LITHO_SOLOADER_TARGET = make_dep_path("lib/soloader:soloader")
LITHO_ASSERTJ_TARGET = make_dep_path("lib/assertj:assertj")
LITHO_COMPILE_TESTING_TARGET = make_dep_path("lib/compile-testing:compile-testing")
LITHO_TRUTH_TARGET = make_dep_path("lib/truth:truth")
LITHO_MOCKITO_TARGET = make_dep_path("lib/mockito:mockito")
LITHO_POWERMOCK_REFLECT_TARGET = make_dep_path("lib/powermock:powermock-reflect")
LITHO_POWERMOCK_MOCKITO_TARGET = make_dep_path("lib/powermock:powermock-mockito")
LITHO_JNI_TARGET = make_dep_path("lib/jni-hack:jni-hack")
LITHO_FBJNI_TARGET = make_dep_path("lib/fbjni:jni")
LITHO_GUAVA_TARGET = make_dep_path("lib/guava:guava")
LITHO_DIFFUTILS_TARGET = make_dep_path("lib/diff-utils:diff-utils")
LITHO_ESPRESSO_TARGET = make_dep_path("lib/espresso:espresso")
LITHO_SCREENSHOT_TARGET = make_dep_path("lib/screenshot:screenshot")
LITHO_JAVAC_TOOLS_TARGET = make_dep_path("lib/javac-tools:javac-tools")
# Fresco
LITHO_FRESCO_TARGET = make_dep_path("lib/fresco:fresco")
LITHO_ROBOLECTRIC_TARGET = make_dep_path("lib/robolectric3:robolectric3")
LITHO_JUNIT_TARGET = make_dep_path("lib/junit:junit")
LITHO_HAMCREST_LIBRARY_TARGET = make_dep_path("lib/hamcrest:hamcrest")
LITHO_HAMCREST_CORE_TARGET = make_dep_path("lib/hamcrest:hamcrest")
# Annotation processors
LITHO_PROCESSOR_TARGET = make_dep_path("litho-processor/src/main/java/com/facebook/litho/specmodels/processor:processor")
LITHO_PROCESSOR_LIB_TARGET = make_dep_path("litho-processor/src/main/java/com/facebook/litho/specmodels/processor:processor-lib")
LITHO_SECTIONS_PROCESSOR_LIB_TARGET = make_dep_path("litho-sections-processor/src/main/java/com/facebook/litho/sections/specmodels/processor:processor-lib")
# Sample app
LITHO_SAMPLE_JAVA = make_dep_path("sample/src/main/java/com/facebook/samples/litho:litho")
LITHO_SAMPLE_BAREBONES_JAVA = make_dep_path("sample-barebones/src/main/java/com/facebook/samples/lithobarebones:lithobarebones")
LITHO_SAMPLE_BAREBONES_RES = make_dep_path("sample-barebones:res")
LITHO_SAMPLE_CODELAB_JAVA = make_dep_path("sample-codelab/src/main/java/com/facebook/samples/lithocodelab:lithocodelab")
LITHO_SAMPLE_CODELAB_RES = make_dep_path("sample-codelab:res")
LITHO_SAMPLE_RES = make_dep_path("sample:res")
# Other targets
LITHO_OSS_TARGET = make_dep_path(":components")
# Targets that sometimes exist and sometimes don't
LITHO_TEXTLAYOUTBUILDER_UTILS_TARGET = []
LITHO_FRESCO_TARGETS = [
make_dep_path("lib/fbcore:fbcore"),
make_dep_path("lib/fresco:fresco-drawee"),
make_dep_path("lib/fresco:fresco"),
]
LITHO_FLIPPER_TARGETS = [
make_dep_path("lib/flipper:flipper"),
]
LITHO_FRESCO_PIPELINE_TARGET = [make_dep_path("lib/fresco:imagepipeline")]
LITHO_FRESCO_CONTROLLER_TARGET = []
LITHO_FRESCO_INTERFACES_TARGET = []
def components_robolectric_test(
name,
*args,
**kwargs):
"""Tests that can successfully run from the library root folder."""
extra_vm_args = [
"-Drobolectric.dependency.dir=lib/android-all",
"-Dcom.facebook.litho.is_oss=true",
]
kwargs["vm_args"] = extra_vm_args
kwargs["use_cxx_libraries"] = True
kwargs["cxx_library_whitelist"] = [
"//lib/yogajni:jni",
"//lib/fbjni:jni",
]
native.robolectric_test(
name = name,
*args,
**kwargs
)
def fb_java_test(*args, **kwargs):
"""Uses native java_test for OSS project."""
java_test(*args, **kwargs)
def litho_android_library(name, srcs = None, *args, **kwargs):
srcs = srcs or []
# This has no meaning in OSS.
kwargs.pop("fblite", None)
native.android_library(name, srcs = srcs, *args, **kwargs)
components_robolectric_powermock_test = components_robolectric_test
def fb_xplat_cxx_library(*args, **kwargs):
"""Delegates to cxx_library for OSS project."""
native.cxx_library(*args, **kwargs)
def fb_android_resource(**kwargs):
"""Delegates to native android_resource rule."""
android_resource(**kwargs)
def fb_java_library(**kwargs):
"""Delegates to native java_library rule."""
native.java_library(**kwargs)
def fb_android_library(**kwargs):
"""Delegates to native android_library rule."""
native.android_library(**kwargs)
def fb_prebuilt_cxx_library(**kwargs):
"""Delegates to native prebuilt_cxx_library."""
native.prebuilt_cxx_library(**kwargs)
def fb_instrumentation_test(**kwargs):
"""
We don't support this in the OSS build for now.
Please use Gradle instead.
"""
_ignore = kwargs
pass
def fb_core_android_library(**kwargs):
native.android_library(**kwargs)
def define_fbjni_targets():
# This target is only used in open source
fb_prebuilt_cxx_library(
name = "ndklog",
exported_platform_linker_flags = [
(
"^android.*",
["-llog"],
),
],
header_only = True,
visibility = LITHO_VISIBILITY,
)
fb_xplat_cxx_library(
name = "jni",
srcs = native.glob(
[
"src/main/cpp/fb/**/*.cpp",
],
),
header_namespace = "",
exported_headers = subdir_glob(
[
("src/main/cpp", "fb/**/*.h"),
],
),
compiler_flags = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-frtti",
"-Wall",
"-std=c++11",
"-DDISABLE_CPUCAP",
"-DDISABLE_XPLAT",
],
exported_platform_headers = [
(
"^(?!android-arm$).*$",
subdir_glob([
("src/main/cpp", "lyra/*.h"),
]),
),
],
platform_srcs = [
(
"^(?!android-arm$).*$",
glob([
"src/main/cpp/lyra/*.cpp",
]),
),
],
soname = "libfb.$(ext)",
visibility = LITHO_VISIBILITY,
deps = [
LITHO_JNI_TARGET,
":ndklog",
],
)
# This target is only used in open source and will break the monobuild
# because we cannot define `soname` multiple times.
def define_yogajni_targets():
fb_prebuilt_cxx_library(
name = "ndklog",
exported_platform_linker_flags = [
(
"^android.*",
["-llog"],
),
],
header_only = True,
visibility = LITHO_VISIBILITY,
)
fb_xplat_cxx_library(
name = "jni",
srcs = native.glob(["src/main/cpp/jni/*.cpp"]),
header_namespace = "",
compiler_flags = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-Wall",
"-O3",
"-std=c++11",
],
soname = "libyoga.$(ext)",
visibility = LITHO_VISIBILITY,
deps = [
make_dep_path("lib/yoga/src/main/cpp:yoga"),
LITHO_FBJNI_TARGET,
":ndklog",
],
)
# This target is only used in open source and will break the monobuild
# because we cannot define `soname` multiple times.
def define_cpp_yoga_targets():
fb_prebuilt_cxx_library(
name = "ndklog",
exported_platform_linker_flags = [
(
"^android.*",
["-llog"],
),
],
header_only = True,
visibility = LITHO_VISIBILITY,
)
fb_xplat_cxx_library(
name = "yoga",
srcs = native.glob(["yoga/*.cpp"]),
header_namespace = "",
exported_headers = native.glob(["yoga/*.h"]),
compiler_flags = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-Wall",
"-std=c++11",
"-O3",
],
force_static = True,
visibility = LITHO_VISIBILITY,
deps = [
":ndklog",
],
)
|
py | 1a35054b8c2cf8d76d5cac9f0bdf2faa27ef2a4f | # Generated by Django 2.1.5 on 2019-03-23 17:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('custom_auth', '0002_init_groups'),
]
operations = [
migrations.AlterField(
model_name='student',
name='year_of_studying',
field=models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)], verbose_name='year of study'),
),
]
|
py | 1a3505b554d42e8e8b7884c5ed2b98dbec4283e4 | # Copyright (c) 2013, Epoch and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def execute(filters=None):
unique_po_items_list = []
global company
company = filters.get("company")
sreq_data=get_po_list()
print "sreq_data ::", sreq_data
columns = get_columns()
sreq_items_data = []
pos_list = ""
for name in sreq_data:
sreq_id = name['name']
pos_list_tmp = name['po_list']
if pos_list_tmp is not None and pos_list_tmp is not "" and pos_list_tmp != 'NULL':
if pos_list == "":
pos_list = pos_list_tmp
else:
pos_list = pos_list + "," + pos_list_tmp
sreq_list = get_requisitioned_items(sreq_id)
if len(sreq_list)!=0:
for sreq_items in sreq_list:
item_code = sreq_items['item_code']
qty = sreq_items['qty']
data = {
"sreq_id": sreq_id,
"item_code": item_code,
"qty": qty
}
sreq_items_data.append(data)
items_list = sreq_items_data
unique_stock_requisition_list = get_unique_items_list(items_list)
print "----------unique_stock_requisition_list-----------::", unique_stock_requisition_list
if len(pos_list)!=0:
items_list = get_po_items_data(pos_list)
unique_po_items_list = get_unique_items_list(items_list)
print "----------unique_po_items_list-----------::", unique_po_items_list
if len(unique_stock_requisition_list)!=0:
data = []
for item_code in unique_stock_requisition_list:
details = unique_stock_requisition_list[item_code]
item = details.item_code
sreq_qty = details.qty
sreq_qty=float("{0:.2f}".format(sreq_qty))
ordered_qty = check_item_code_in_po_list(unique_po_items_list,item)
ordered_qty=float("{0:.2f}".format(ordered_qty))
pending_qty = float(sreq_qty) - float(ordered_qty)
pending_qty=float("{0:.2f}".format(pending_qty))
print "sreq_qty-------",sreq_qty
data.append([
item,
sreq_qty,
ordered_qty,
pending_qty
])
return columns, data
else:
pass
def get_columns():
"""return columns"""
columns = [
_("Item")+":Link/Item:100",
_("Total Qty")+"::100",
_("Ordered Qty")+"::140",
_("Pending Qty")+"::100"
]
return columns
def get_po_list():
#po_list = frappe.db.sql("""select name,po_list,material_request_type from `tabStock Requisition` where name="SREQ-00022" """, as_dict=1)
po_list = frappe.db.sql("""select name,po_list,material_request_type from `tabStock Requisition` where docstatus=1 and material_request_type='Purchase' """, as_dict=1)
return po_list
def get_requisitioned_items(sreq_id):
sreq_list = frappe.db.sql("""select item_code,qty from `tabStock Requisition Item` where parent=%s""",(sreq_id),as_dict=1)
return sreq_list
def get_unique_items_list(items_list):
if len(items_list)!=0:
items_map = {}
for data in items_list:
item_code = data['item_code']
qty = data['qty']
key = item_code
if key in items_map:
item_entry = items_map[key]
qty_temp = item_entry["qty"]
item_entry["qty"] = (qty_temp) + (qty)
else:
items_map[key] = frappe._dict({
"item_code": item_code,
"qty": qty,
})
print "-------items_map--------::", items_map
return items_map
def get_po_items_data(pos_list):
items_details = []
splitted_pos_list = pos_list.split(",")
if len(splitted_pos_list)!=0:
for po in splitted_pos_list:
po_items = get_po_items(po)
for item in po_items:
data = get_items_list(item)
items_details.append(data)
return items_details
def get_po_items(po):
po_items = frappe.db.sql("""select item_code,qty from `tabPurchase Order Item` where parent=%s and docstatus=1""", (po), as_dict=1)
return po_items
def get_items_list(item):
item_code = item['item_code']
qty = item['qty']
data = {"item_code":item_code,
"qty":qty
}
#print "-----------------data is::", data
return data
def check_item_code_in_po_list(unique_po_items_list,item):
if item in unique_po_items_list:
item_entry = unique_po_items_list[item]
qty = item_entry['qty']
else:
qty = 0
return qty
|
py | 1a3505c8f94573b48aa65026b344acdaebe0774a | from ..typing import Number
from .base import MarkdownSection
__all__ = ["MarkdownCodeBlock"]
class MarkdownCodeBlock(MarkdownSection):
def append(self, line: str) -> None:
self.lines.append(line)
def reformatted(self, width: Number = 88) -> str:
return "\n".join([line.rstrip() for line in self.lines])
|
py | 1a3505dcaece29bef26866321c407888a3e3fa54 | # -*- coding: utf-8 -*-
"""Unit tests of everything related to retrieving the version
There are four tree states we want to check:
A: sitting on the 1.0 tag
B: dirtying the tree after 1.0
C: a commit after a tag, clean tree
D: a commit after a tag, dirty tree
"""
from __future__ import absolute_import, division, print_function
import inspect
import os
import re
import shutil
import sys
from contextlib import contextmanager
from os.path import join as path_join
from os.path import exists
from shutil import copyfile, rmtree
import pytest
from pyscaffold import shell
from pyscaffold.cli import main as putup
from pyscaffold.repo import add_tag
from pyscaffold.shell import command_exists, git
from pyscaffold.utils import chdir
pytestmark = pytest.mark.slow
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
def cmd_path(cmd):
"""Try to get a fully specified command path.
Returns the full path when possible, otherwise just the command name.
Useful when running from virtualenv context.
"""
candidates = os.getenv('PATH', '').split(os.pathsep)
candidates.insert(0, path_join(sys.prefix, 'bin'))
if hasattr(sys, 'real_prefix'):
candidates.insert(1, path_join(getattr(sys, 'real_prefix'), 'bin'))
for candidate in candidates:
full_path = path_join(candidate, cmd)
if exists(full_path):
return full_path
return cmd
def venv_cmd(cmd, *args):
"""Create a callable from a command inside a virtualenv."""
return shell.ShellCommand(' '.join([cmd_path(cmd)] + list(args)))
pip = venv_cmd("pip")
setup_py = venv_cmd("python", "setup.py")
untar = shell.ShellCommand(
("gtar" if command_exists("gtar") else "tar") + " xvzkf")
type_ = shell.ShellCommand('file')
# ^ BSD tar differs in options from GNU tar,
# so make sure to use the correct one...
# https://xkcd.com/1168/
def is_inside_venv():
return hasattr(sys, 'real_prefix')
def check_clean_venv():
installed = [line.split()[0] for line in pip('list')]
dirty = ['demoapp', 'demoapp_data', 'UNKNOWN']
app_list = [x for x in dirty if x in installed]
if not app_list:
return
else:
raise RuntimeError("Dirty virtual environment:\n{} found".format(
', '.join(app_list)))
def create_demoapp(data=False):
if data:
demoapp = 'demoapp_data'
else:
demoapp = 'demoapp'
putup([demoapp])
with chdir(demoapp):
demoapp_src_dir = os.path.join(__location__, demoapp)
demoapp_dst_root = os.getcwd()
demoapp_dst_pkg = os.path.join(demoapp_dst_root, 'src', demoapp)
copyfile(os.path.join(demoapp_src_dir, 'runner.py'),
os.path.join(demoapp_dst_pkg, 'runner.py'))
git('add', os.path.join(demoapp_dst_pkg, 'runner.py'))
copyfile(os.path.join(demoapp_src_dir, 'setup.cfg'),
os.path.join(demoapp_dst_root, 'setup.cfg'))
copyfile(os.path.join(demoapp_src_dir, 'setup.py'),
os.path.join(demoapp_dst_root, 'setup.py'))
git('add', os.path.join(demoapp_dst_root, 'setup.cfg'))
git('add', os.path.join(demoapp_dst_root, 'setup.py'))
if data:
data_src_dir = os.path.join(demoapp_src_dir, 'data')
data_dst_dir = os.path.join(demoapp_dst_pkg, 'data')
os.mkdir(data_dst_dir)
copyfile(os.path.join(data_src_dir, 'hello_world.txt'),
os.path.join(data_dst_dir, 'hello_world.txt'))
git('add', os.path.join(data_dst_dir, 'hello_world.txt'))
git('commit', '-m', 'Added basic application logic')
def build_demoapp(dist, path=None, demoapp='demoapp'):
if path is None:
path = os.getcwd()
path = os.path.join(path, demoapp)
with chdir(path):
setup_py(dist)
@contextmanager
def installed_demoapp(dist=None, path=None, demoapp='demoapp'):
check_clean_venv()
if path is None:
path = os.getcwd()
path = os.path.join(path, demoapp, "dist", "{}*".format(demoapp))
if dist == 'bdist':
with chdir('/'):
output = untar(path)
install_dirs = list()
install_bin = None
for line in output:
if re.search(r".*/site-packages/{}.*?/$".format(demoapp), line):
install_dirs.append(line)
if re.search(r".*/bin/{}$".format(demoapp), line):
install_bin = line
elif dist == 'install':
with chdir(demoapp):
setup_py('install')
else:
pip("install", path)
try:
yield venv_cmd(demoapp)
finally:
if dist == 'bdist':
with chdir('/'):
os.remove(install_bin)
for path in install_dirs:
rmtree(path, ignore_errors=True)
else:
pip("uninstall", "-y", demoapp)
def check_version(output, exp_version, dirty=False):
version = output.split(' ')[-1]
# for some setuptools version a directory with + is generated, sometimes _
if dirty:
if '+' in version:
ver, local = version.split('+')
else:
ver, local = version.split('_')
assert local.endswith('dirty')
assert ver == exp_version
else:
if '+' in version:
ver = version.split('+')
else:
ver = version.split('_')
if len(ver) > 1:
assert not ver[1].endswith('dirty')
assert ver[0] == exp_version
def make_dirty_tree(demoapp='demoapp'):
dirty_file = os.path.join('src', demoapp, 'runner.py')
with chdir(demoapp):
with open(dirty_file, 'a') as fh:
fh.write("\n\ndirty_variable = 69\n")
def make_commit(demoapp='demoapp'):
with chdir(demoapp):
git('commit', '-a', '-m', 'message')
def rm_git_tree(demoapp='demoapp'):
git_path = os.path.join(demoapp, '.git')
shutil.rmtree(git_path)
def test_sdist_install(tmpfolder):
create_demoapp()
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
def test_sdist_install_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v0.1', 'first tag')
make_dirty_tree()
make_commit()
make_dirty_tree()
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "0.1.post0.dev1"
check_version(out, exp, dirty=True)
def test_sdist_install_with_1_0_tag(tmpfolder):
create_demoapp()
make_dirty_tree()
make_commit()
add_tag('demoapp', 'v1.0', 'final release')
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "1.0"
check_version(out, exp, dirty=False)
def test_sdist_install_with_1_0_tag_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v1.0', 'final release')
make_dirty_tree()
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "1.0"
check_version(out, exp, dirty=True)
# bdist works like sdist so we only try one combination
def test_bdist_install(tmpfolder):
create_demoapp()
build_demoapp('bdist')
with installed_demoapp('bdist') as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
# bdist wheel works like sdist so we only try one combination
@pytest.mark.skipif(not is_inside_venv(),
reason='Needs to run in a virtualenv')
def test_bdist_wheel_install(tmpfolder):
create_demoapp()
build_demoapp('bdist_wheel')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
def test_git_repo(tmpfolder):
create_demoapp()
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '0.0.post0.dev2'
check_version(out, exp, dirty=False)
def test_git_repo_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v0.1', 'first tag')
make_dirty_tree()
make_commit()
make_dirty_tree()
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '0.1.post0.dev1'
check_version(out, exp, dirty=True)
def test_git_repo_with_1_0_tag(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v1.0', 'final release')
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '1.0'
check_version(out, exp, dirty=False)
def test_git_repo_with_1_0_tag_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v1.0', 'final release')
make_dirty_tree()
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '1.0'
check_version(out, exp, dirty=True)
def test_sdist_install_with_data(tmpfolder):
create_demoapp(data=True)
build_demoapp('sdist', demoapp='demoapp_data')
with installed_demoapp(demoapp='demoapp_data') as demoapp_data:
out = next(demoapp_data())
exp = "Hello World"
assert out.startswith(exp)
def test_bdist_install_with_data(tmpfolder):
create_demoapp(data=True)
build_demoapp('bdist', demoapp='demoapp_data')
with installed_demoapp('bdist', demoapp='demoapp_data') as demoapp_data:
out = next(demoapp_data())
exp = "Hello World"
assert out.startswith(exp)
@pytest.mark.skipif(not is_inside_venv(),
reason='Needs to run in a virtualenv')
def test_bdist_wheel_install_with_data(tmpfolder):
create_demoapp(data=True)
build_demoapp('bdist_wheel', demoapp='demoapp_data')
with installed_demoapp(demoapp='demoapp_data') as demoapp_data:
out = next(demoapp_data())
exp = "Hello World"
assert out.startswith(exp)
def test_setup_py_install(tmpfolder):
create_demoapp()
with installed_demoapp('install', demoapp='demoapp') as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
|
py | 1a3505f88f86b463c19d6ff73df70e3063e4e5ba | # -*- coding: UTF-8 -*-
# Copyright 2015-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""
Another readonly user interface to :mod:`team
<lino_book.projects.team>`.
See :ref:`noi.specs.public`.
.. autosummary::
:toctree:
settings
tests
"""
|
py | 1a3506a85e9d865dfe9d55bc22528b599549f249 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
name = "google-cloud-automl"
description = "Cloud AutoML API client library"
version = "0.2.0"
release_status = "Development Status :: 3 - Alpha"
dependencies = [
"google-api-core[grpc] >= 1.6.0, < 2.0.0dev",
'enum34; python_version < "3.4"',
]
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
|
py | 1a35074461491eff5edf074c5f0bccb47f8ba6de | # -*- coding: utf-8 -*-
import os
import tempfile
class SettingsClass(object):
__def_IMG_ADDITION_PATH = [] # Пути, кроме текущего и мб еще какого-то подобного
__def_MinSimilarity = 0.995 # Почти устойчиво с 0.995, но однажны не нашел узелок для контура. 0.700 -- будет найдено в каждом пикселе (порог надо поднимать выше).
__def_FindFailedDir = os.path.join(tempfile.gettempdir(), 'find_failed')
# Logger:
__def_PatternURLTemplate = None # Где искать картинки-шаблрны. Строка-шаблон с %s, куда подставляется имя файла с картинкой-шаблоном. К примеру: http://192.168.116.1/pikuli/pattern/ok_button.png
def __init__(self):
defvals = self.__get_default_values()
for k in defvals:
setattr(self, k, defvals[k])
def __get_default_values(self):
defvals = {}
for attr in dir(self):
if '_SettingsClass__def_' in attr:
defvals[attr.split('_SettingsClass__def_')[-1]] = getattr(self, attr)
return defvals
def addImagePath(self, path):
_path = os.path.abspath(path)
if not os.path.isdir(_path):
raise Exception('pikuli.addImagePath(...): Path \'%s\' does not exist!' % str(path))
if _path not in self.IMG_ADDITION_PATH:
self.IMG_ADDITION_PATH.append(_path)
def listImagePath(self):
for path in self.IMG_ADDITION_PATH:
yield path
def setFindFailedDir(self, path):
if not os.path.exists(path):
try:
os.makedirs(path)
except Exception:
raise Exception('pikuli.setFindFailedDir(...): can not set SettingsClass.FindFailedDir to \'%s\' -- failed to create directory.' % str(path))
self.FindFailedDir = path
def getFindFailedDir(self):
return self.FindFailedDir
def setPatternURLTemplate(self, GetPattern_URLTemplate):
self.GetPattern_URLTemplate = GetPattern_URLTemplate
def getPatternURLTemplate(self):
return self.GetPattern_URLTemplate
|
py | 1a350776bf0b58589ee86bd74cb81cb30d9c76ac | # coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = ['zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete',
'oito', 'nove', 'dez', 'onze', 'doze', 'treze', 'catorze',
'quinze', 'dezesseis', 'dezasseis', 'dezessete', 'dezassete', 'dezoito', 'dezenove', 'dezanove', 'vinte',
'trinta', 'quarenta', 'cinquenta', 'sessenta', 'setenta',
'oitenta', 'noventa', 'cem', 'mil', 'milhão', 'bilhão', 'bilião', 'trilhão', 'trilião',
'quatrilhão']
_ordinal_words = ['primeiro', 'segundo', 'terceiro', 'quarto', 'quinto', 'sexto',
'sétimo', 'oitavo', 'nono', 'décimo', 'vigésimo', 'trigésimo',
'quadragésimo', 'quinquagésimo', 'sexagésimo', 'septuagésimo',
'octogésimo', 'nonagésimo', 'centésimo', 'ducentésimo',
'trecentésimo', 'quadringentésimo', 'quingentésimo', 'sexcentésimo',
'septingentésimo', 'octingentésimo', 'nongentésimo', 'milésimo',
'milionésimo', 'bilionésimo']
def like_num(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {
LIKE_NUM: like_num
}
|
py | 1a3508c4a2a985e9f1a0d26fb9747b6fd90ff372 | """
dummy
"""
|
py | 1a350945ebcfa16c1f46937753a6096c762192e1 |
import datetime
import time
import redis
import random
import schedule
import Adafruit_ADS1x15
from threading import Timer
from aurorapy.client import AuroraError, AuroraTCPClient, AuroraSerialClient
def now():
return datetime.datetime.now()
def dtepoch_ms(dt):
return int(time.mktime(dt.timetuple())) * 1000 + dt.microsecond / 1000
def today_elapsed_ms():
today = datetime.date.today()
today_datetime = datetime.datetime(
year=today.year,
month=today.month,
day=today.day)
return int((now() - today_datetime).total_seconds() * 1000)
def daily_ms():
return 24*60*60*1000
def is_more_than_24h_ahead(epoch_ms):
return ((dtepoch_ms(now()) - daily_ms()) > epoch_ms)
class DataMgr:
"""Data Manager"""
def __init__(self,
period_sample_s=1, # How many times (consumption) data gets sampled
aggregate_interval_s=3600):
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
self.lastaggregate= now()
self.production_blink_ts = now()
self.reset()
self.period_check_s = 1
self.aggregate_interval_s = aggregate_interval_s
self.period_sample_s = period_sample_s
self.adc = Adafruit_ADS1x15.ADS1115(busnum=1)
self.inverter = AuroraSerialClient(port='/dev/ttyUSB0', address=2,
baudrate=19200, data_bits=8, parity='N', stop_bits=1, timeout=0.1, tries=3)
try:
self.inverter.connect()
except AuroraError as e:
self.log(str(e))
Timer(self.period_check_s, self.check_to_aggregate_timeout, ()).start()
Timer(self.period_sample_s, self.sample_cW_pW_Vgrid, ()).start()
schedule.every().day.at("00:00").do(self.daily_aggregate)
self.truncate_older_than_24h()
def log(self, logstr):
print str(now()) + "\t| " + logstr
def sample_cW_pW_Vgrid(self):
# Note you can change the I2C address from its default (0x48), and/or the I2C
# bus by passing in these optional parameters:
#adc = Adafruit_ADS1x15.ADS1015(address=0x49, busnum=1)
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
GAIN = 1
V = max(0, self.adc.read_adc(0, gain=GAIN) * 100 / 1000000.0)
c_W = int(V / 0.12 * 580)
p_W = 0
V_grid = 0
try:
p_W = int(self.inverter.measure(3, global_measure=True))
V_grid = int(self.inverter.measure(1, global_measure=True))
except AuroraError as e:
pass
self.set(p_W, c_W, V_grid)
Timer(self.period_sample_s, self.sample_cW_pW_Vgrid, ()).start()
def production_blink(self):
# Just register this API on rising edge and you're done following here
##http://raspi.tv/2013/how-to-use-interrupts-with-python-on-the-raspberry-pi-and-rpi-gpio-part-3
p_W = 3600 / (now() - self.production_blink_ts).total_seconds()
self.production_blink_ts = now()
self.set(p_W=p_W)
def check_to_aggregate_timeout(self):
_now = now()
if (_now - self.lastaggregate).seconds > self.aggregate_interval_s:
self.lastaggregate = _now
self.aggregate_store()
schedule.run_pending()
Timer(self.period_check_s, self.check_to_aggregate_timeout, ()).start()
def aggregate_store(self):
self.log("Data aggregation routine")
self.r.rpush('aggregate_ts_ms_since_epoch', dtepoch_ms(self.lastupdate))
self.r.rpush('p_Wh', self.p_Wh)
self.r.rpush('c_Wh', self.c_Wh)
self.r.rpush('a_Wh', self.a_Wh)
self.r.rpush('s_Wh', self.s_Wh)
self.r.rpush('b_Wh', self.b_Wh)
self.reset_aggregate()
self.truncate_older_than_24h()
def daily_aggregate(self):
self.log("Aggregating daily data")
aggregate_len = self.r.llen('aggregate_ts_ms_since_epoch')
daily_p_Wh = 0
daily_c_Wh = 0
daily_a_Wh = 0
daily_s_Wh = 0
daily_b_Wh = 0
for i in range(0, aggregate_len):
daily_p_Wh = daily_p_Wh+ float(self.r.lindex('p_Wh', i))
daily_c_Wh = daily_c_Wh+ float(self.r.lindex('c_Wh', i))
daily_a_Wh = daily_a_Wh+ float(self.r.lindex('a_Wh', i))
daily_s_Wh = daily_s_Wh+ float(self.r.lindex('s_Wh', i))
daily_b_Wh = daily_b_Wh+ float(self.r.lindex('b_Wh', i))
self.r.rpush('daily_epoch_ms', dtepoch_ms(now()))
self.r.rpush('daily_p_Wh', daily_p_Wh)
self.r.rpush('daily_c_Wh', daily_c_Wh)
self.r.rpush('daily_a_Wh', daily_a_Wh)
self.r.rpush('daily_s_Wh', daily_s_Wh)
self.r.rpush('daily_b_Wh', daily_b_Wh)
self.truncate_older_than_24h()
def truncate_older_than_24h(self):
a_ts = self.r.lindex('aggregate_ts_ms_since_epoch', 0)
if not a_ts:
return
epoch_ms = float(a_ts)
while is_more_than_24h_ahead(epoch_ms):
self.r.lpop('aggregate_ts_ms_since_epoch')
self.r.lpop('p_Wh')
self.r.lpop('c_Wh')
self.r.lpop('a_Wh')
self.r.lpop('s_Wh')
self.r.lpop('b_Wh')
oldest_aggregate_ts = self.r.lindex('aggregate_ts_ms_since_epoch', 0)
if not oldest_aggregate_ts:
break
epoch_ms = float(oldest_aggregate_ts)
def live_store(self):
self.r.rpush('ts_ms_since_epoch', dtepoch_ms(self.lastupdate))
self.r.rpush('p_W', self.now_p_W)
self.r.rpush('c_W', self.now_c_W)
self.r.rpush('V_grid', self.now_V_grid)
while self.r.llen('ts_ms_since_epoch') > 300:
self.r.lpop('ts_ms_since_epoch')
while self.r.llen('p_W') > 300:
self.r.lpop('p_W')
while self.r.llen('c_W') > 300:
self.r.lpop('c_W')
while self.r.llen('V_grid') > 300:
self.r.lpop('V_grid')
def get_latest_live_data(self):
res = {
'ts_ms_since_epoch' : map(int, self.r.lrange('ts_ms_since_epoch', 0, -1)) ,
'p_W' : map(float, self.r.lrange('p_W', 0, -1)) ,
'c_W' : map(float, self.r.lrange('c_W', 0, -1)) ,
'V_grid' : map(float, self.r.lrange('V_grid', 0, -1)) ,
}
return res
def get_last_day_aggregate_data(self):
res = {
'aggregate_ts_ms_since_epoch' : map(int, self.r.lrange('aggregate_ts_ms_since_epoch', 0, -1)),
'p_Wh' : map(float, self.r.lrange('p_Wh', 0, -1)),
'c_Wh' : map(float, self.r.lrange('c_Wh', 0, -1)),
'a_Wh' : map(float, self.r.lrange('a_Wh', 0, -1)),
's_Wh' : map(float, self.r.lrange('s_Wh', 0, -1)),
'b_Wh' : map(float, self.r.lrange('b_Wh', 0, -1))
}
return res
def get_last_365_days_aggregate(self):
res = {
'daily_epoch_ms' :map(int, self.r.lrange('daily_epoch_ms', -365, -1)),
'daily_p_Wh' :map(float, self.r.lrange('daily_p_Wh', -365, -1)),
'daily_c_Wh' :map(float, self.r.lrange('daily_c_Wh', -365, -1)),
'daily_a_Wh' :map(float, self.r.lrange('daily_a_Wh', -365, -1)),
'daily_s_Wh' :map(float, self.r.lrange('daily_s_Wh', -365, -1)),
'daily_b_Wh' :map(float, self.r.lrange('daily_b_Wh', -365, -1))
}
return res
def get_production_W(self):
return self.now_p_W
def get_consumption_W(self):
return self.now_c_W
def get_V_grid(self):
return self.now_V_grid
def get_day_production_Wh(self):
return self.p_Wh
def get_day_consumption_Wh(self):
return self.c_Wh
def get_day_auto_consumed_Wh(self):
return self.a_Wh
def get_day_bought_Wh(self):
return self.b_Wh
def set(self, p_W=-1, c_W=-1, V_grid=-1):
if p_W == -1:
p_W = self.now_p_W
if c_W == -1:
c_W = self.now_c_W
if V_grid == -1:
V_grid = self.now_V_grid
elapsed = ((now() - self.lastupdate).total_seconds())
a_W = min(p_W, c_W) #autoconsumed
s_W = (p_W - a_W) #sold
b_W = (c_W - a_W) #bought
self.p_Wh = self.p_Wh + (p_W * elapsed / 3600) #produced [Wh]
self.c_Wh = self.c_Wh + (c_W * elapsed / 3600) #consumed [Wh]
self.a_Wh = self.a_Wh + (a_W * elapsed / 3600) #auto-consumed [Wh]
self.s_Wh = self.s_Wh + (s_W * elapsed / 3600) #sold [Wh]
self.b_Wh = self.b_Wh + (b_W * elapsed / 3600) #bought [Wh]
self.now_p_W = p_W
self.now_c_W = c_W
self.now_V_grid = V_grid
self.lastupdate = now()
self.live_store()
def reset(self):
self.now_p_W = 0
self.now_c_W = 0
self.now_V_grid = 0
self.lastupdate = now()
self.reset_aggregate()
def reset_aggregate(self):
self.p_Wh = 0 #produced [Wh]
self.c_Wh = 0 #consumed [Wh]
self.a_Wh = 0 #auto-consumed [Wh]
self.s_Wh = 0 #sold [Wh]
self.b_Wh = 0 #bought [Wh]
|
py | 1a350b3de1f5298c3bc2dca8ea259a6b2c6a3d8b | #!/usr/bin/env python3
# coding=utf-8
"""
Parser that uses the ENTSOE API to return the following data types.
Consumption
Production
Exchanges
Exchange Forecast
Day-ahead Price
Generation Forecast
Consumption Forecast
"""
import itertools
import numpy as np
from bs4 import BeautifulSoup
from collections import defaultdict
import arrow
import logging, os, re
import requests
import pandas as pd
from .lib.validation import validate
from .lib.utils import sum_production_dicts, get_token
ENTSOE_ENDPOINT = 'https://transparency.entsoe.eu/api'
ENTSOE_PARAMETER_DESC = {
'B01': 'Biomass',
'B02': 'Fossil Brown coal/Lignite',
'B03': 'Fossil Coal-derived gas',
'B04': 'Fossil Gas',
'B05': 'Fossil Hard coal',
'B06': 'Fossil Oil',
'B07': 'Fossil Oil shale',
'B08': 'Fossil Peat',
'B09': 'Geothermal',
'B10': 'Hydro Pumped Storage',
'B11': 'Hydro Run-of-river and poundage',
'B12': 'Hydro Water Reservoir',
'B13': 'Marine',
'B14': 'Nuclear',
'B15': 'Other renewable',
'B16': 'Solar',
'B17': 'Waste',
'B18': 'Wind Offshore',
'B19': 'Wind Onshore',
'B20': 'Other',
}
ENTSOE_PARAMETER_BY_DESC = {v: k for k, v in ENTSOE_PARAMETER_DESC.items()}
ENTSOE_PARAMETER_GROUPS = {
'production': {
'biomass': ['B01', 'B17'],
'coal': ['B02', 'B05', 'B07', 'B08'],
'gas': ['B03', 'B04'],
'geothermal': ['B09'],
'hydro': ['B11', 'B12'],
'nuclear': ['B14'],
'oil': ['B06'],
'solar': ['B16'],
'wind': ['B18', 'B19'],
'unknown': ['B20', 'B13', 'B15']
},
'storage': {
'hydro storage': ['B10']
}
}
ENTSOE_PARAMETER_BY_GROUP = {v: k for k, g in ENTSOE_PARAMETER_GROUPS.items() for v in g}
# Get all the individual storage parameters in one list
ENTSOE_STORAGE_PARAMETERS = list(itertools.chain.from_iterable(
ENTSOE_PARAMETER_GROUPS['storage'].values()))
# Define all ENTSOE zone_key <-> domain mapping
# see https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html
ENTSOE_DOMAIN_MAPPINGS = {
'AL': '10YAL-KESH-----5',
'AT': '10YAT-APG------L',
'AX': '10Y1001A1001A46L', # for price only; Åland has SE-SE3 area price
'BA': '10YBA-JPCC-----D',
'BE': '10YBE----------2',
'BG': '10YCA-BULGARIA-R',
'BY': '10Y1001A1001A51S',
'CH': '10YCH-SWISSGRIDZ',
'CZ': '10YCZ-CEPS-----N',
'DE': '10Y1001A1001A83F',
'DE-LU': '10Y1001A1001A82H',
'DK': '10Y1001A1001A65H',
'DK-DK1': '10YDK-1--------W',
'DK-DK2': '10YDK-2--------M',
'EE': '10Y1001A1001A39I',
'ES': '10YES-REE------0',
'FI': '10YFI-1--------U',
'FR': '10YFR-RTE------C',
'GB': '10YGB----------A',
'GB-NIR': '10Y1001A1001A016',
'GR': '10YGR-HTSO-----Y',
'HR': '10YHR-HEP------M',
'HU': '10YHU-MAVIR----U',
'IE': '10YIE-1001A00010',
'IT': '10YIT-GRTN-----B',
'IT-BR': '10Y1001A1001A699',
'IT-CA': '10Y1001C--00096J',
'IT-CNO': '10Y1001A1001A70O',
'IT-CSO': '10Y1001A1001A71M',
'IT-FO': '10Y1001A1001A72K',
'IT-NO': '10Y1001A1001A73I',
'IT-PR': '10Y1001A1001A76C',
'IT-SAR': '10Y1001A1001A74G',
'IT-SIC': '10Y1001A1001A75E',
'IT-SO': '10Y1001A1001A788',
'LT': '10YLT-1001A0008Q',
'LU': '10YLU-CEGEDEL-NQ',
'LV': '10YLV-1001A00074',
# 'MD': 'MD',
'ME': '10YCS-CG-TSO---S',
'MK': '10YMK-MEPSO----8',
'MT': '10Y1001A1001A93C',
'NL': '10YNL----------L',
'NO': '10YNO-0--------C',
'NO-NO1': '10YNO-1--------2',
'NO-NO2': '10YNO-2--------T',
'NO-NO3': '10YNO-3--------J',
'NO-NO4': '10YNO-4--------9',
'NO-NO5': '10Y1001A1001A48H',
'PL': '10YPL-AREA-----S',
'PT': '10YPT-REN------W',
'RO': '10YRO-TEL------P',
'RS': '10YCS-SERBIATSOV',
'RU': '10Y1001A1001A49F',
'RU-KGD': '10Y1001A1001A50U',
'SE': '10YSE-1--------K',
'SE-SE1': '10Y1001A1001A44P',
'SE-SE2': '10Y1001A1001A45N',
'SE-SE3': '10Y1001A1001A46L',
'SE-SE4': '10Y1001A1001A47J',
'SI': '10YSI-ELES-----O',
'SK': '10YSK-SEPS-----K',
'TR': '10YTR-TEIAS----W',
'UA': '10YUA-WEPS-----0'
}
# Generation per unit can only be obtained at EIC (Control Area) level
ENTSOE_EIC_MAPPING = {
'DK-DK1': '10Y1001A1001A796',
'DK-DK2': '10Y1001A1001A796',
'FI': '10YFI-1--------U',
'PL': '10YPL-AREA-----S',
'SE': '10YSE-1--------K',
# TODO: ADD DE
}
# Some exchanges require specific domains
ENTSOE_EXCHANGE_DOMAIN_OVERRIDE = {
'AT->IT-NO': [ENTSOE_DOMAIN_MAPPINGS['AT'], ENTSOE_DOMAIN_MAPPINGS['IT']],
'BY->UA': [ENTSOE_DOMAIN_MAPPINGS['BY'], '10Y1001C--00003F'],
'DE->DK-DK1': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['DK-DK1']],
'DE->DK-DK2': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['DK-DK2']],
'DE->SE-SE4': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE4']],
'DK-DK2->SE': [ENTSOE_DOMAIN_MAPPINGS['DK-DK2'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE4']],
'DE->NO-NO2': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['NO-NO2']],
'FR-COR->IT-CNO': ['10Y1001A1001A893', ENTSOE_DOMAIN_MAPPINGS['IT-CNO']],
'GR->IT-SO': [ENTSOE_DOMAIN_MAPPINGS['GR'],
ENTSOE_DOMAIN_MAPPINGS['IT-SO']],
'IT-CSO->ME': [ENTSOE_DOMAIN_MAPPINGS['IT'],
ENTSOE_DOMAIN_MAPPINGS['ME']],
'NO-NO3->SE': [ENTSOE_DOMAIN_MAPPINGS['NO-NO3'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE2']],
'NO-NO4->SE': [ENTSOE_DOMAIN_MAPPINGS['NO-NO4'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE2']],
'NO-NO1->SE': [ENTSOE_DOMAIN_MAPPINGS['NO-NO1'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE3']],
'PL->UA': [ENTSOE_DOMAIN_MAPPINGS['PL'], '10Y1001A1001A869'],
'IT-SIC->IT-SO': [ENTSOE_DOMAIN_MAPPINGS['IT-SIC'], ENTSOE_DOMAIN_MAPPINGS['IT-CA']],
}
# Some zone_keys are part of bidding zone domains for price data
ENTSOE_PRICE_DOMAIN_OVERRIDE = {
'DK-BHM': ENTSOE_DOMAIN_MAPPINGS['DK-DK2'],
'DE': ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
'IE': '10Y1001A1001A59C',
'LU': ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
}
ENTSOE_UNITS_TO_ZONE = {
# DK-DK1
'Anholt': 'DK-DK1',
'Esbjergvaerket 3': 'DK-DK1',
'Fynsvaerket 7': 'DK-DK1',
'Horns Rev A': 'DK-DK1',
'Horns Rev B': 'DK-DK1',
'Nordjyllandsvaerket 3': 'DK-DK1',
'Silkeborgvaerket': 'DK-DK1',
'Skaerbaekvaerket 3': 'DK-DK1',
'Studstrupvaerket 3': 'DK-DK1',
'Studstrupvaerket 4': 'DK-DK1',
# DK-DK2
'Amagervaerket 3': 'DK-DK2',
'Asnaesvaerket 2': 'DK-DK2',
'Asnaesvaerket 5': 'DK-DK2',
'Avedoerevaerket 1': 'DK-DK2',
'Avedoerevaerket 2': 'DK-DK2',
'Kyndbyvaerket 21': 'DK-DK2',
'Kyndbyvaerket 22': 'DK-DK2',
'Roedsand 1': 'DK-DK2',
'Roedsand 2': 'DK-DK2',
# FI
'Alholmens B2': 'FI',
'Haapavesi B1': 'FI',
'Kaukaan Voima G10': 'FI',
'Keljonlahti B1': 'FI',
'Loviisa 1 G11': 'FI',
'Loviisa 1 G12': 'FI',
'Loviisa 2 G21': 'FI',
'Loviisa 2 G22': 'FI',
'Olkiluoto 1 B1': 'FI',
'Olkiluoto 2 B2': 'FI',
'Toppila B2': 'FI',
# SE
'Bastusel G1': 'SE',
'Forsmark block 1 G11': 'SE',
'Forsmark block 1 G12': 'SE',
'Forsmark block 2 G21': 'SE',
'Forsmark block 2 G22': 'SE',
'Forsmark block 3 G31': 'SE',
'Gallejaur G1': 'SE',
'Gallejaur G2': 'SE',
'Gasturbiner Halmstad G12': 'SE',
'Harsprånget G1': 'SE',
'Harsprånget G2': 'SE',
'Harsprånget G4': 'SE',
'Harsprånget G5': 'SE',
'KVV Västerås G3': 'SE',
'KVV1 Värtaverket': 'SE',
'KVV6 Värtaverket ': 'SE',
'KVV8 Värtaverket': 'SE',
'Karlshamn G1': 'SE',
'Karlshamn G2': 'SE',
'Karlshamn G3': 'SE',
'Letsi G1': 'SE',
'Letsi G2': 'SE',
'Letsi G3': 'SE',
'Ligga G3': 'SE',
'Messaure G1': 'SE',
'Messaure G2': 'SE',
'Messaure G3': 'SE',
'Oskarshamn G1Ö+G1V': 'SE',
'Oskarshamn G3': 'SE',
'Porjus G11': 'SE',
'Porjus G12': 'SE',
'Porsi G3': 'SE',
'Ringhals block 1 G11': 'SE',
'Ringhals block 1 G12': 'SE',
'Ringhals block 2 G21': 'SE',
'Ringhals block 2 G22': 'SE',
'Ringhals block 3 G31': 'SE',
'Ringhals block 3 G32': 'SE',
'Ringhals block 4 G41': 'SE',
'Ringhals block 4 G42': 'SE',
'Ritsem G1': 'SE',
'Rya KVV': 'SE',
'Seitevare G1': 'SE',
'Stalon G1': 'SE',
'Stenungsund B3': 'SE',
'Stenungsund B4': 'SE',
'Stornorrfors G1': 'SE',
'Stornorrfors G2': 'SE',
'Stornorrfors G3': 'SE',
'Stornorrfors G4': 'SE',
'Trängslet G1': 'SE',
'Trängslet G2': 'SE',
'Trängslet G3': 'SE',
'Uppsala KVV': 'SE',
'Vietas G1': 'SE',
'Vietas G2': 'SE',
'Ã
byverket Ãrebro': 'SE',
}
VALIDATIONS = {
# This is a list of criteria to ensure validity of data,
# used in validate_production()
# Note that "required" means data is present in ENTSOE.
# It will still work if data is present but 0.
# "expected_range" and "floor" only count production and storage
# - not exchanges!
'AT': {
'required': ['hydro'],
},
'BE': {
'required': ['gas', 'nuclear'],
'expected_range': (3000, 25000),
},
'BG': {
'required': ['coal', 'nuclear', 'hydro'],
'expected_range': (2000, 20000),
},
'CH': {
'required': ['hydro', 'nuclear'],
'expected_range': (2000, 25000),
},
'CZ': {
# usual load is in 7-12 GW range
'required': ['coal', 'nuclear'],
'expected_range': (3000, 25000),
},
'DE': {
# Germany sometimes has problems with categories of generation missing from ENTSOE.
# Normally there is constant production of a few GW from hydro and biomass
# and when those are missing this can indicate that others are missing as well.
# We have also never seen unknown being 0.
# Usual load is in 30 to 80 GW range.
'required': ['coal', 'gas', 'nuclear', 'wind',
'biomass', 'hydro', 'unknown', 'solar'],
'expected_range': (20000, 100000),
},
'EE': {
'required': ['coal'],
},
'ES': {
'required': ['coal', 'nuclear'],
'expected_range': (10000, 80000),
},
'FI': {
'required': ['coal', 'nuclear', 'hydro', 'biomass'],
'expected_range': (2000, 20000),
},
'GB': {
# usual load is in 15 to 50 GW range
'required': ['coal', 'gas', 'nuclear'],
'expected_range': (10000, 80000),
},
'GR': {
'required': ['coal', 'gas'],
'expected_range': (2000, 20000),
},
'HU': {
'required': ['coal', 'nuclear'],
},
'IE': {
'required': ['coal'],
'expected_range': (1000, 15000),
},
'IT': {
'required': ['coal'],
'expected_range': (5000, 50000),
},
'PL': {
# usual load is in 10-20 GW range and coal is always present
'required': ['coal'],
'expected_range': (5000, 35000),
},
'PT': {
'required': ['coal', 'gas'],
'expected_range': (1000, 20000),
},
'RO': {
'required': ['coal', 'nuclear', 'hydro'],
'expected_range': (2000, 25000),
},
'RS': {
'required': ['coal'],
},
'SI': {
# own total generation capacity is around 4 GW
'required': ['nuclear'],
'expected_range': (1000, 5000),
},
'SK': {
'required': ['nuclear']
},
}
class QueryError(Exception):
"""Raised when a query to ENTSOE returns no matching data."""
def closest_in_time_key(x, target_datetime, datetime_key='datetime'):
target_datetime = arrow.get(target_datetime)
return np.abs((x[datetime_key] - target_datetime).seconds)
def check_response(response, function_name):
"""
Searches for an error message in response if the query to ENTSOE fails.
Returns a QueryError message containing function name and reason for failure.
"""
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.find_all('text')
if not response.ok:
if len(text):
error_text = soup.find_all('text')[0].prettify()
if 'No matching data found' in error_text:
return
raise QueryError('{0} failed in ENTSOE.py. Reason: {1}'.format(function_name, error_text))
else:
raise QueryError('{0} failed in ENTSOE.py. Reason: {1}'.format(function_name, response.text))
def query_ENTSOE(session, params, target_datetime=None, span=(-48, 24)):
"""
Makes a standard query to the ENTSOE API with a modifiable set of parameters.
Allows an existing session to be passed.
Raises an exception if no API token is found.
Returns a request object.
"""
if target_datetime is None:
target_datetime = arrow.utcnow()
else:
# make sure we have an arrow object
target_datetime = arrow.get(target_datetime)
params['periodStart'] = target_datetime.shift(hours=span[0]).format('YYYYMMDDHH00')
params['periodEnd'] = target_datetime.shift(hours=span[1]).format('YYYYMMDDHH00')
# Due to rate limiting, we need to spread our requests across different tokens
tokens = get_token('ENTSOE_TOKEN').split(',')
params['securityToken'] = np.random.choice(tokens)
return session.get(ENTSOE_ENDPOINT, params=params)
def query_consumption(domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A65',
'processType': 'A16',
'outBiddingZone_Domain': domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_consumption.__name__)
def query_production(in_domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A75',
'processType': 'A16', # Realised
'in_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime, span=(-48, 0))
if response.ok:
return response.text
else:
check_response(response, query_production.__name__)
def query_production_per_units(psr_type, domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A73',
'processType': 'A16',
'psrType': psr_type,
'in_Domain': domain,
}
# Note: ENTSOE only supports 1d queries for this type
response = query_ENTSOE(session, params, target_datetime, span=(-24, 0))
if response.ok:
return response.text
else:
check_response(response, query_production_per_units.__name__)
def query_exchange(in_domain, out_domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A11',
'in_Domain': in_domain,
'out_Domain': out_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_exchange.__name__)
def query_exchange_forecast(in_domain, out_domain, session, target_datetime=None):
"""
Gets exchange forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
params = {
'documentType': 'A09', # Finalised schedule
'in_Domain': in_domain,
'out_Domain': out_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_exchange_forecast.__name__)
def query_price(domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A44',
'in_Domain': domain,
'out_Domain': domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_price.__name__)
def query_generation_forecast(in_domain, session, target_datetime=None):
"""
Gets generation forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
# Note: this does not give a breakdown of the production
params = {
'documentType': 'A71', # Generation Forecast
'processType': 'A01', # Realised
'in_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_generation_forecast.__name__)
def query_consumption_forecast(in_domain, session, target_datetime=None):
"""
Gets consumption forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
params = {
'documentType': 'A65', # Load Forecast
'processType': 'A01',
'outBiddingZone_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_generation_forecast.__name__)
def query_wind_solar_production_forecast(in_domain, session, target_datetime=None):
"""
Gets consumption forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
params = {
'documentType': 'A69', # Forecast
'processType': 'A01',
'in_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_generation_forecast.__name__)
def datetime_from_position(start, position, resolution):
"""Finds time granularity of data."""
m = re.search(r'PT(\d+)([M])', resolution)
if m:
digits = int(m.group(1))
scale = m.group(2)
if scale == 'M':
return start.shift(minutes=(position - 1) * digits)
raise NotImplementedError('Could not recognise resolution %s' % resolution)
def parse_scalar(xml_text, only_inBiddingZone_Domain=False, only_outBiddingZone_Domain=False):
"""Returns a tuple containing two lists."""
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
values = []
datetimes = []
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
if only_inBiddingZone_Domain:
if not len(timeseries.find_all('inBiddingZone_Domain.mRID'.lower())):
continue
elif only_outBiddingZone_Domain:
if not len(timeseries.find_all('outBiddingZone_Domain.mRID'.lower())):
continue
for entry in timeseries.find_all('point'):
position = int(entry.find_all('position')[0].contents[0])
value = float(entry.find_all('quantity')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
values.append(value)
datetimes.append(datetime)
return values, datetimes
def parse_production(xml_text):
"""Returns a tuple containing two lists."""
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
productions = []
datetimes = []
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
is_production = len(timeseries.find_all('inBiddingZone_Domain.mRID'.lower())) > 0
psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
try:
i = datetimes.index(datetime)
if is_production:
productions[i][psr_type] += quantity
elif psr_type in ENTSOE_STORAGE_PARAMETERS:
# Only include consumption if it's for storage. In other cases
# it is power plant self-consumption which should be ignored.
productions[i][psr_type] -= quantity
except ValueError: # Not in list
datetimes.append(datetime)
productions.append(defaultdict(lambda: 0))
productions[-1][psr_type] = quantity if is_production else -1 * quantity
return productions, datetimes
def parse_self_consumption(xml_text):
"""
Parses the XML text and returns a dict of datetimes to the total self-consumption
value from all sources.
Self-consumption is the electricity used by a generation source.
This is defined as any consumption source (i.e. outBiddingZone_Domain.mRID)
that is not storage, e.g. consumption for B04 (Fossil Gas) is counted as
self-consumption, but consumption for B10 (Hydro Pumped Storage) is not.
In most cases, total self-consumption is reported by ENTSOE as 0, therefore the returned
dict only includes datetimes where the value > 0.
"""
if not xml_text: return None
soup = BeautifulSoup(xml_text, 'html.parser')
res = {}
for timeseries in soup.find_all('timeseries'):
is_consumption = len(timeseries.find_all('outBiddingZone_Domain.mRID'.lower())) > 0
if not is_consumption: continue
psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]
if psr_type in ENTSOE_STORAGE_PARAMETERS: continue
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
if quantity == 0: continue
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
res[datetime] = res[datetime] + quantity if datetime in res else quantity
return res
def parse_production_per_units(xml_text):
"""Returns a dict indexed by the (datetime, unit_key) key"""
values = {}
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
is_production = len(timeseries.find_all('inBiddingZone_Domain.mRID'.lower())) > 0
psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]
unit_key = timeseries.find_all('mktpsrtype')[0].find_all(
'powersystemresources')[0].find_all('mrid')[0].contents[0]
unit_name = timeseries.find_all('mktpsrtype')[0].find_all(
'powersystemresources')[0].find_all('name')[0].contents[0]
if not is_production: continue
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
key = (unit_key, datetime)
if key in values:
if is_production:
values[key]['production'] += quantity
else:
values[key]['production'] -= quantity
else:
values[key] = {
'datetime': datetime,
'production': quantity,
'productionType': ENTSOE_PARAMETER_BY_GROUP[psr_type],
'unitKey': unit_key,
'unitName': unit_name
}
return values.values()
def parse_exchange(xml_text, is_import, quantities=None, datetimes=None):
"""Returns a tuple containing two lists."""
if not xml_text:
return None
quantities = quantities or []
datetimes = datetimes or []
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
# Only use contract_marketagreement.type == A01 (Total to avoid double counting some columns)
if timeseries.find_all('contract_marketagreement.type') and \
timeseries.find_all('contract_marketagreement.type')[0].contents[0] != 'A05':
continue
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
if not is_import:
quantity *= -1
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
# Find out whether or not we should update the net production
try:
i = datetimes.index(datetime)
quantities[i] += quantity
except ValueError: # Not in list
quantities.append(quantity)
datetimes.append(datetime)
return quantities, datetimes
def parse_price(xml_text):
"""Returns a tuple containing three lists."""
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
prices = []
currencies = []
datetimes = []
for timeseries in soup.find_all('timeseries'):
currency = timeseries.find_all('currency_unit.name')[0].contents[0]
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
for entry in timeseries.find_all('point'):
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
prices.append(float(entry.find_all('price.amount')[0].contents[0]))
datetimes.append(datetime)
currencies.append(currency)
return prices, currencies, datetimes
def validate_production(datapoint, logger):
"""
Production data can sometimes be available but clearly wrong.
The most common occurrence is when the production total is very low and
main generation types are missing. In reality a country's electrical grid
could not function in this scenario.
This function checks datapoints for a selection of countries and returns
False if invalid and True otherwise.
"""
zone_key = datapoint['zoneKey']
validation_criteria = VALIDATIONS.get(zone_key, {})
if validation_criteria:
return validate(datapoint, logger=logger, **validation_criteria)
if zone_key.startswith('DK-'):
return validate(datapoint, logger=logger, required=['coal', 'solar', 'wind'])
if zone_key.startswith('NO-'):
return validate(datapoint, logger=logger, required=['hydro'])
return True
def get_wind(values):
if 'Wind Onshore' in values or 'Wind Offshore' in values:
return values.get('Wind Onshore', 0) + values.get('Wind Offshore', 0)
def fetch_consumption(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""Gets consumption for a specified zone, returns a dictionary."""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_scalar(
query_consumption(domain, session, target_datetime=target_datetime),
only_outBiddingZone_Domain=True)
if parsed:
quantities, datetimes = parsed
# Add power plant self-consumption data. This is reported as part of the
# production data by ENTSOE.
# self_consumption is a dict of datetimes to the total self-consumption value
# from all sources.
# Only datetimes where the value > 0 are included.
self_consumption = parse_self_consumption(
query_production(domain, session,
target_datetime=target_datetime))
for dt, value in self_consumption.items():
try:
i = datetimes.index(dt)
except ValueError:
logger.warning(
f'No corresponding consumption value found for self-consumption at {dt}')
continue
quantities[i] += value
# if a target_datetime was requested, we return everything
if target_datetime:
return [{
'zoneKey': zone_key,
'datetime': dt.datetime,
'consumption': quantity,
'source': 'entsoe.eu'
} for dt, quantity in zip(datetimes, quantities)]
# else we keep the last stored value
# Note, this may not include self-consumption data as sometimes consumption
# data is available for a given TZ a few minutes before production data is.
dt, quantity = datetimes[-1].datetime, quantities[-1]
if dt not in self_consumption:
logger.warning(f'Self-consumption data not yet available for {zone_key} at {dt}')
data = {
'zoneKey': zone_key,
'datetime': dt,
'consumption': quantity,
'source': 'entsoe.eu'
}
return data
def fetch_production(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets values and corresponding datetimes for all production types in the
specified zone. Removes any values that are in the future or don't have
a datetime associated with them.
Returns a list of dictionaries that have been validated.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab production
parsed = parse_production(
query_production(domain, session,
target_datetime=target_datetime))
if not parsed:
return None
productions, production_dates = parsed
data = []
for i in range(len(production_dates)):
production_values = {k: v for k, v in productions[i].items()}
production_date = production_dates[i]
production_types = {'production': {}, 'storage': {}}
for key in ['production', 'storage']:
parameter_groups = ENTSOE_PARAMETER_GROUPS[key]
multiplier = -1 if key == 'storage' else 1
for fuel, groups in parameter_groups.items():
has_value = any([production_values.get(grp) is not None for grp in groups])
if has_value:
value = sum([production_values.get(grp, 0) for grp in groups])
value *= multiplier
else:
value = None
production_types[key][fuel] = value
data.append({
'zoneKey': zone_key,
'datetime': production_date.datetime,
'production': production_types['production'],
'storage': {
'hydro': production_types['storage']['hydro storage'],
},
'source': 'entsoe.eu'
})
for d in data:
for k, v in d['production'].items():
if v is None: continue
if v < 0 and v > -50:
# Set small negative values to 0
logger.warning('Setting small value of %s (%s) to 0.' % (k, v),
extra={'key': zone_key})
d['production'][k] = 0
return list(filter(lambda x: validate_production(x, logger), data))
ZONE_KEY_AGGREGATES = {
'IT-SO': ['IT-CA', 'IT-SO'],
}
# TODO: generalize and move to lib.utils so other parsers can reuse it. (it's
# currently used by US_SEC.)
def merge_production_outputs(parser_outputs, merge_zone_key, merge_source=None):
"""
Given multiple parser outputs, sum the production and storage
of corresponding datetimes to create a production list.
This will drop rows where the datetime is missing in at least a
parser_output.
"""
if len(parser_outputs) == 0:
return []
if merge_source is None:
merge_source = parser_outputs[0][0]['source']
prod_and_storage_dfs = [
pd.DataFrame(output).set_index('datetime')[['production', 'storage']]
for output in parser_outputs
]
to_return = prod_and_storage_dfs[0]
for prod_and_storage in prod_and_storage_dfs[1:]:
# `inner` join drops rows where one of the production is missing
to_return = to_return.join(
prod_and_storage, how='inner', rsuffix='_other')
to_return['production'] = to_return.apply(
lambda row: sum_production_dicts(row.production,
row.production_other),
axis=1)
to_return['storage'] = to_return.apply(
lambda row: sum_production_dicts(row.storage, row.storage_other),
axis=1)
to_return = to_return[['production', 'storage']]
return [{
'datetime': dt.to_pydatetime(),
'production': row.production,
'storage': row.storage,
'source': merge_source,
'zoneKey': merge_zone_key,
} for dt, row in to_return.iterrows()]
def fetch_production_aggregate(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if zone_key not in ZONE_KEY_AGGREGATES:
raise ValueError('Unknown aggregate key %s' % zone_key)
return merge_production_outputs(
[fetch_production(k, session, target_datetime, logger)
for k in ZONE_KEY_AGGREGATES[zone_key]],
zone_key)
def fetch_production_per_units(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Returns a list of all production units and production values as a list
of dictionaries
"""
if not session:
session = requests.session()
domain = ENTSOE_EIC_MAPPING[zone_key]
data = []
# Iterate over all psr types
for k in ENTSOE_PARAMETER_DESC.keys():
try:
values = parse_production_per_units(
query_production_per_units(k, domain, session, target_datetime)) or []
for v in values:
if not v:
continue
v['datetime'] = v['datetime'].datetime
v['source'] = 'entsoe.eu'
if not v['unitName'] in ENTSOE_UNITS_TO_ZONE:
logger.warning('Unknown unit %s with id %s' % (v['unitName'], v['unitKey']))
else:
v['zoneKey'] = ENTSOE_UNITS_TO_ZONE[v['unitName']]
if v['zoneKey'] == zone_key:
data.append(v)
except QueryError:
pass
return data
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets exchange status between two specified zones.
Removes any datapoints that are in the future.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
sorted_zone_keys = sorted([zone_key1, zone_key2])
key = '->'.join(sorted_zone_keys)
if key in ENTSOE_EXCHANGE_DOMAIN_OVERRIDE:
domain1, domain2 = ENTSOE_EXCHANGE_DOMAIN_OVERRIDE[key]
else:
domain1 = ENTSOE_DOMAIN_MAPPINGS[zone_key1]
domain2 = ENTSOE_DOMAIN_MAPPINGS[zone_key2]
# Create a hashmap with key (datetime)
exchange_hashmap = {}
# Grab exchange
# Import
parsed = parse_exchange(
query_exchange(domain1, domain2, session, target_datetime=target_datetime),
is_import=True)
if parsed:
# Export
parsed = parse_exchange(
xml_text=query_exchange(domain2, domain1, session, target_datetime=target_datetime),
is_import=False, quantities=parsed[0], datetimes=parsed[1])
if parsed:
quantities, datetimes = parsed
for i in range(len(quantities)):
exchange_hashmap[datetimes[i]] = quantities[i]
# Remove all dates in the future
exchange_dates = sorted(set(exchange_hashmap.keys()), reverse=True)
exchange_dates = list(filter(lambda x: x <= arrow.now(), exchange_dates))
if not len(exchange_dates):
return None
data = []
for exchange_date in exchange_dates:
net_flow = exchange_hashmap[exchange_date]
data.append({
'sortedZoneKeys': key,
'datetime': exchange_date.datetime,
'netFlow': net_flow if zone_key1[0] == sorted_zone_keys else -1 * net_flow,
'source': 'entsoe.eu'
})
return data
def fetch_exchange_forecast(zone_key1, zone_key2, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets exchange forecast between two specified zones.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
sorted_zone_keys = sorted([zone_key1, zone_key2])
key = '->'.join(sorted_zone_keys)
if key in ENTSOE_EXCHANGE_DOMAIN_OVERRIDE:
domain1, domain2 = ENTSOE_EXCHANGE_DOMAIN_OVERRIDE[key]
else:
domain1 = ENTSOE_DOMAIN_MAPPINGS[zone_key1]
domain2 = ENTSOE_DOMAIN_MAPPINGS[zone_key2]
# Create a hashmap with key (datetime)
exchange_hashmap = {}
# Grab exchange
# Import
parsed = parse_exchange(
query_exchange_forecast(domain1, domain2, session, target_datetime=target_datetime),
is_import=True)
if parsed:
# Export
parsed = parse_exchange(
xml_text=query_exchange_forecast(domain2, domain1, session,
target_datetime=target_datetime),
is_import=False, quantities=parsed[0], datetimes=parsed[1])
if parsed:
quantities, datetimes = parsed
for i in range(len(quantities)):
exchange_hashmap[datetimes[i]] = quantities[i]
# Remove all dates in the future
sorted_zone_keys = sorted([zone_key1, zone_key2])
exchange_dates = list(sorted(set(exchange_hashmap.keys()), reverse=True))
if not len(exchange_dates):
return None
data = []
for exchange_date in exchange_dates:
netFlow = exchange_hashmap[exchange_date]
data.append({
'sortedZoneKeys': key,
'datetime': exchange_date.datetime,
'netFlow': netFlow if zone_key1[0] == sorted_zone_keys else -1 * netFlow,
'source': 'entsoe.eu'
})
return data
def fetch_price(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets day-ahead price for specified zone.
Returns a list of dictionaries.
"""
# Note: This is day-ahead prices
if not session:
session = requests.session()
if zone_key in ENTSOE_PRICE_DOMAIN_OVERRIDE:
domain = ENTSOE_PRICE_DOMAIN_OVERRIDE[zone_key]
else:
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_price(query_price(domain, session, target_datetime=target_datetime))
if parsed:
data = []
prices, currencies, datetimes = parsed
for i in range(len(prices)):
data.append({
'zoneKey': zone_key,
'datetime': datetimes[i].datetime,
'currency': currencies[i],
'price': prices[i],
'source': 'entsoe.eu'
})
return data
def fetch_generation_forecast(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets generation forecast for specified zone.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_scalar(query_generation_forecast(
domain, session, target_datetime=target_datetime), only_inBiddingZone_Domain=True)
if parsed:
data = []
values, datetimes = parsed
for i in range(len(values)):
data.append({
'zoneKey': zone_key,
'datetime': datetimes[i].datetime,
'value': values[i],
'source': 'entsoe.eu'
})
return data
def fetch_consumption_forecast(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets consumption forecast for specified zone.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_scalar(query_consumption_forecast(
domain, session, target_datetime=target_datetime), only_outBiddingZone_Domain=True)
if parsed:
data = []
values, datetimes = parsed
for i in range(len(values)):
data.append({
'zoneKey': zone_key,
'datetime': datetimes[i].datetime,
'value': values[i],
'source': 'entsoe.eu'
})
return data
def fetch_wind_solar_forecasts(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets values and corresponding datetimes for all production types in the
specified zone. Removes any values that are in the future or don't have
a datetime associated with them.
Returns a list of dictionaries that have been validated.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab production
parsed = parse_production(
query_wind_solar_production_forecast(domain, session,
target_datetime=target_datetime))
if not parsed:
return None
productions, production_dates = parsed
data = []
for i in range(len(production_dates)):
production_values = {ENTSOE_PARAMETER_DESC[k]: v for k, v in
productions[i].items()}
production_date = production_dates[i]
data.append({
'zoneKey': zone_key,
'datetime': production_date.datetime,
'production': {
'solar': production_values.get('Solar', None),
'wind': get_wind(production_values),
},
'source': 'entsoe.eu'
})
return data
|
py | 1a350b3e39f97c8bc5a56fcdaaa70c1a6ba31461 | """Support for ADS covers."""
import logging
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import (
CONF_ADS_VAR,
CONF_ADS_VAR_POSITION,
DATA_ADS,
STATE_KEY_POSITION,
STATE_KEY_STATE,
AdsEntity,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ADS Cover"
CONF_ADS_VAR_SET_POS = "adsvar_set_position"
CONF_ADS_VAR_OPEN = "adsvar_open"
CONF_ADS_VAR_CLOSE = "adsvar_close"
CONF_ADS_VAR_STOP = "adsvar_stop"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_ADS_VAR_POSITION): cv.string,
vol.Optional(CONF_ADS_VAR_SET_POS): cv.string,
vol.Optional(CONF_ADS_VAR_CLOSE): cv.string,
vol.Optional(CONF_ADS_VAR_OPEN): cv.string,
vol.Optional(CONF_ADS_VAR_STOP): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the cover platform for ADS."""
ads_hub = hass.data[DATA_ADS]
ads_var_is_closed = config.get(CONF_ADS_VAR)
ads_var_position = config.get(CONF_ADS_VAR_POSITION)
ads_var_pos_set = config.get(CONF_ADS_VAR_SET_POS)
ads_var_open = config.get(CONF_ADS_VAR_OPEN)
ads_var_close = config.get(CONF_ADS_VAR_CLOSE)
ads_var_stop = config.get(CONF_ADS_VAR_STOP)
name = config[CONF_NAME]
device_class = config.get(CONF_DEVICE_CLASS)
add_entities(
[
AdsCover(
ads_hub,
ads_var_is_closed,
ads_var_position,
ads_var_pos_set,
ads_var_open,
ads_var_close,
ads_var_stop,
name,
device_class,
)
]
)
class AdsCover(AdsEntity, CoverEntity):
"""Representation of ADS cover."""
def __init__(
self,
ads_hub,
ads_var_is_closed,
ads_var_position,
ads_var_pos_set,
ads_var_open,
ads_var_close,
ads_var_stop,
name,
device_class,
):
"""Initialize AdsCover entity."""
super().__init__(ads_hub, name, ads_var_is_closed)
if self._ads_var is None:
if ads_var_position is not None:
self._unique_id = ads_var_position
elif ads_var_pos_set is not None:
self._unique_id = ads_var_pos_set
elif ads_var_open is not None:
self._unique_id = ads_var_open
self._state_dict[STATE_KEY_POSITION] = None
self._ads_var_position = ads_var_position
self._ads_var_pos_set = ads_var_pos_set
self._ads_var_open = ads_var_open
self._ads_var_close = ads_var_close
self._ads_var_stop = ads_var_stop
self._device_class = device_class
async def async_added_to_hass(self):
"""Register device notification."""
if self._ads_var is not None:
await self.async_initialize_device(
self._ads_var, self._ads_hub.PLCTYPE_BOOL
)
if self._ads_var_position is not None:
await self.async_initialize_device(
self._ads_var_position, self._ads_hub.PLCTYPE_BYTE, STATE_KEY_POSITION
)
@property
def device_class(self):
"""Return the class of this cover."""
return self._device_class
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._ads_var is not None:
return self._state_dict[STATE_KEY_STATE]
if self._ads_var_position is not None:
return self._state_dict[STATE_KEY_POSITION] == 0
return None
@property
def current_cover_position(self):
"""Return current position of cover."""
return self._state_dict[STATE_KEY_POSITION]
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE
if self._ads_var_stop is not None:
supported_features |= SUPPORT_STOP
if self._ads_var_pos_set is not None:
supported_features |= SUPPORT_SET_POSITION
return supported_features
def stop_cover(self, **kwargs):
"""Fire the stop action."""
if self._ads_var_stop:
self._ads_hub.write_by_name(
self._ads_var_stop, True, self._ads_hub.PLCTYPE_BOOL
)
def set_cover_position(self, **kwargs):
"""Set cover position."""
position = kwargs[ATTR_POSITION]
if self._ads_var_pos_set is not None:
self._ads_hub.write_by_name(
self._ads_var_pos_set, position, self._ads_hub.PLCTYPE_BYTE
)
def open_cover(self, **kwargs):
"""Move the cover up."""
if self._ads_var_open is not None:
self._ads_hub.write_by_name(
self._ads_var_open, True, self._ads_hub.PLCTYPE_BOOL
)
elif self._ads_var_pos_set is not None:
self.set_cover_position(position=100)
def close_cover(self, **kwargs):
"""Move the cover down."""
if self._ads_var_close is not None:
self._ads_hub.write_by_name(
self._ads_var_close, True, self._ads_hub.PLCTYPE_BOOL
)
elif self._ads_var_pos_set is not None:
self.set_cover_position(position=0)
@property
def available(self):
"""Return False if state has not been updated yet."""
if self._ads_var is not None or self._ads_var_position is not None:
return (
self._state_dict[STATE_KEY_STATE] is not None
or self._state_dict[STATE_KEY_POSITION] is not None
)
return True
|
py | 1a350d1db9861a06ab6fb6541be86b5b16227f26 | """DataUpdateCoordinator for the Yale integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Any
from yalesmartalarmclient.client import YaleSmartAlarmClient
from yalesmartalarmclient.exceptions import AuthenticationError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN, LOGGER, YALE_BASE_ERRORS
class YaleDataUpdateCoordinator(DataUpdateCoordinator):
"""A Yale Data Update Coordinator."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize the Yale hub."""
self.entry = entry
self.yale: YaleSmartAlarmClient | None = None
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_interval=timedelta(seconds=DEFAULT_SCAN_INTERVAL),
)
async def _async_update_data(self) -> dict[str, Any]:
"""Fetch data from Yale."""
updates = await self.hass.async_add_executor_job(self.get_updates)
locks = []
door_windows = []
for device in updates["cycle"]["device_status"]:
state = device["status1"]
if device["type"] == "device_type.door_lock":
lock_status_str = device["minigw_lock_status"]
lock_status = int(str(lock_status_str or 0), 16)
closed = (lock_status & 16) == 16
locked = (lock_status & 1) == 1
if not lock_status and "device_status.lock" in state:
device["_state"] = "locked"
device["_state2"] = "unknown"
locks.append(device)
continue
if not lock_status and "device_status.unlock" in state:
device["_state"] = "unlocked"
device["_state2"] = "unknown"
locks.append(device)
continue
if (
lock_status
and (
"device_status.lock" in state or "device_status.unlock" in state
)
and closed
and locked
):
device["_state"] = "locked"
device["_state2"] = "closed"
locks.append(device)
continue
if (
lock_status
and (
"device_status.lock" in state or "device_status.unlock" in state
)
and closed
and not locked
):
device["_state"] = "unlocked"
device["_state2"] = "closed"
locks.append(device)
continue
if (
lock_status
and (
"device_status.lock" in state or "device_status.unlock" in state
)
and not closed
):
device["_state"] = "unlocked"
device["_state2"] = "open"
locks.append(device)
continue
device["_state"] = "unavailable"
locks.append(device)
continue
if device["type"] == "device_type.door_contact":
if "device_status.dc_close" in state:
device["_state"] = "closed"
door_windows.append(device)
continue
if "device_status.dc_open" in state:
device["_state"] = "open"
door_windows.append(device)
continue
device["_state"] = "unavailable"
door_windows.append(device)
continue
_sensor_map = {
contact["address"]: contact["_state"] for contact in door_windows
}
_lock_map = {lock["address"]: lock["_state"] for lock in locks}
return {
"alarm": updates["arm_status"],
"locks": locks,
"door_windows": door_windows,
"status": updates["status"],
"online": updates["online"],
"sensor_map": _sensor_map,
"lock_map": _lock_map,
"panel_info": updates["panel_info"],
}
def get_updates(self) -> dict[str, Any]:
"""Fetch data from Yale."""
if self.yale is None:
try:
self.yale = YaleSmartAlarmClient(
self.entry.data[CONF_USERNAME], self.entry.data[CONF_PASSWORD]
)
except AuthenticationError as error:
raise ConfigEntryAuthFailed from error
except YALE_BASE_ERRORS as error:
raise UpdateFailed from error
try:
arm_status = self.yale.get_armed_status()
data = self.yale.get_all()
cycle = data["CYCLE"]
status = data["STATUS"]
online = data["ONLINE"]
panel_info = data["PANEL INFO"]
except AuthenticationError as error:
raise ConfigEntryAuthFailed from error
except YALE_BASE_ERRORS as error:
raise UpdateFailed from error
return {
"arm_status": arm_status,
"cycle": cycle,
"status": status,
"online": online,
"panel_info": panel_info,
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.