filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_11552 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"TruncatedNormal",
"Variable",
}
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
def set_cpu0(device_string):
"""Creates a new device string based on `device_string' but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device.device_type = "CPU"
parsed_device.device_index = 0
return parsed_device.to_string()
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, graph_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op == "DynamicStitch":
dtype = node_def.attr["T"].type
if dtype == dtypes.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == dtypes.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
edges = {} # Keyed by the dest node name.
name_to_node_map = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
node_seq = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node_map[n] = node
edges[n] = [_node_name(x) for x in node.input]
node_seq[n] = seq
seq += 1
for d in dest_nodes:
assert d in name_to_node_map, "%s is not in graph" % d
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = dest_nodes[:]
while next_to_visit:
n = next_to_visit[0]
del next_to_visit[0]
if n in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(n)
next_to_visit += edges[n]
nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node_map[n])])
return out
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
def convert_variables_to_constants(sess, input_graph_def, output_node_names):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
Returns:
GraphDef containing a simplified version of the original.
"""
found_variables = {}
variable_names = []
variable_dict_names = []
for node in input_graph_def.node:
if node.op == "Assign":
variable_name = node.input[0]
variable_dict_names.append(variable_name)
variable_names.append(variable_name + ":0")
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
found_variables = dict(zip(variable_dict_names, returned_variables))
logging.info("Frozen %d variables." % len(returned_variables))
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = graph_pb2.NodeDef()
if input_node.name in found_variables:
output_node.op = "Const"
output_node.name = input_node.name
dtype = input_node.attr["dtype"]
data = found_variables[input_node.name]
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(data,
dtype=dtype.type,
shape=data.shape)))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
print("Converted %d variables to const ops." % how_many_converted)
return output_graph_def
|
the-stack_0_11553 | # Copyright (c) 2016, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, Tags
from .validators import boolean, integer, positive_integer
class SourceAuth(AWSProperty):
props = {
'Resource': (basestring, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'OAUTH'
]
auth_types = self.properties.get('Type')
if auth_types not in valid_types:
raise ValueError('SourceAuth Type: must be one of %s' %
','.join(valid_types))
class Artifacts(AWSProperty):
props = {
'EncryptionDisabled': (boolean, False),
'Location': (basestring, False),
'Name': (basestring, False),
'NamespaceType': (basestring, False),
'OverrideArtifactName': (boolean, False),
'Packaging': (basestring, False),
'Path': (basestring, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'CODEPIPELINE',
'NO_ARTIFACTS',
'S3',
]
artifact_type = self.properties.get('Type')
if artifact_type not in valid_types:
raise ValueError('Artifacts Type: must be one of %s' %
','.join(valid_types))
if artifact_type == 'S3':
for required_property in ['Name', 'Location']:
if not self.properties.get(required_property):
raise ValueError(
'Artifacts Type S3: requires %s to be set' %
required_property
)
class EnvironmentVariable(AWSProperty):
props = {
'Name': (basestring, True),
'Type': (basestring, False),
'Value': (basestring, True),
}
def validate(self):
if 'Type' in self.properties:
valid_types = [
'PARAMETER_STORE',
'PLAINTEXT',
]
env_type = self.properties.get('Type')
if env_type not in valid_types:
raise ValueError(
'EnvironmentVariable Type: must be one of %s' %
','.join(valid_types))
class Environment(AWSProperty):
props = {
'ComputeType': (basestring, True),
'EnvironmentVariables': ((list, [EnvironmentVariable]), False),
'Image': (basestring, True),
'PrivilegedMode': (boolean, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'LINUX_CONTAINER',
'WINDOWS_CONTAINER',
]
env_type = self.properties.get('Type')
if env_type not in valid_types:
raise ValueError('Environment Type: must be one of %s' %
','.join(valid_types))
class ProjectCache(AWSProperty):
props = {
'Location': (basestring, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'NO_CACHE',
'S3',
]
cache_type = self.properties.get('Type')
if cache_type not in valid_types:
raise ValueError('ProjectCache Type: must be one of %s' %
','.join(valid_types))
class Source(AWSProperty):
props = {
'Auth': (SourceAuth, False),
'BuildSpec': (basestring, False),
'GitCloneDepth': (positive_integer, False),
'InsecureSsl': (boolean, False),
'Location': (basestring, False),
'ReportBuildStatus': (boolean, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'BITBUCKET',
'CODECOMMIT',
'CODEPIPELINE',
'GITHUB',
'GITHUB_ENTERPRISE',
'NO_SOURCE',
'S3',
]
location_agnostic_types = [
'CODEPIPELINE',
'NO_SOURCE',
]
source_type = self.properties.get('Type')
# Don't do additional checks if source_type can't
# be determined (for example, being a Ref).
if isinstance(source_type, AWSHelperFn):
return
if source_type not in valid_types:
raise ValueError('Source Type: must be one of %s' %
','.join(valid_types))
location = self.properties.get('Location')
if source_type not in location_agnostic_types and not location:
raise ValueError(
'Source Location: must be defined when type is %s' %
source_type
)
auth = self.properties.get('Auth')
if auth is not None and source_type is not 'GITHUB':
raise ValueError("SourceAuth: must only be defined when using "
"'GITHUB' Source Type.")
class VpcConfig(AWSProperty):
props = {
'SecurityGroupIds': ([basestring], True),
'Subnets': ([basestring], True),
'VpcId': (basestring, True),
}
class ProjectTriggers(AWSProperty):
props = {
'Webhook': (boolean, False),
}
def validate_status(status):
""" Validate status
:param status: The Status of CloudWatchLogs or S3Logs
:return: The provided value if valid
"""
valid_statuses = [
'ENABLED',
'DISABLED'
]
if status not in valid_statuses:
raise ValueError('Status: must be one of %s' %
','.join(valid_statuses))
return status
class CloudWatchLogs(AWSProperty):
props = {
"Status": (validate_status, True),
"GroupName": (basestring, False),
"StreamName": (basestring, False)
}
class S3Logs(AWSProperty):
props = {
"Status": (validate_status, True),
"Location": (basestring, False)
}
class LogsConfig(AWSProperty):
props = {
'CloudWatchLogs': (CloudWatchLogs, False),
'S3Logs': (S3Logs, False)
}
class Project(AWSObject):
resource_type = "AWS::CodeBuild::Project"
props = {
'Artifacts': (Artifacts, True),
'BadgeEnabled': (boolean, False),
'Cache': (ProjectCache, False),
'Description': (basestring, False),
'EncryptionKey': (basestring, False),
'Environment': (Environment, True),
"LogsConfig": (LogsConfig, False),
'Name': (basestring, True),
'SecondaryArtifacts': ([Artifacts], False),
'SecondarySources': ([Source], False),
'ServiceRole': (basestring, True),
'Source': (Source, True),
'Tags': (Tags, False),
'TimeoutInMinutes': (integer, False),
'Triggers': (ProjectTriggers, False),
'VpcConfig': (VpcConfig, False),
}
|
the-stack_0_11554 | import sys
sys.path.append('.')
from util.game import Game
from util.func import Case
from util.card import Card, CardList, CardSuit
from util.player import Player
from typing import List, Optional, Tuple
import random
class LevelUp(Game):
### Constants
PLAYERNUM: int = 4
CARDPOOL: List[Card] = [Card(i) for i in range(54)] * 2
BASESCORE: int = 80
LEVELSCORE: int = 40
def __init__(self):
self.players: List[Optional[Player]] = [None] * LevelUp.PLAYERNUM
self.discard_buffer: Optional[CardList] = None
self.curPlayerIndex: Optional[int] = None
self.rankLevel: Tuple[int, int] = (1, 1)
self.dealerIndex: Optional[int] = None
self.rankMain: int = 1
# E.g. (Spade, 0, 1) means the main suit is spade (suited with a single spade by 0-th player)
self.suitMain: Optional[Tuple(CardSuit, int, int)] = None
self.score: int = 0
self.state: str = 'END'
for i in range(len(self.players)):
self.players[i] = Player()
def inform(self, information):
case = Case(self.state)
if case('END'):
case = Case(information)
if case('START'):
self.state = 'DISPATCH'
return (True, self._dispatch(),
{
'suit': self.suitMain,
'rank': self.rankMain,
'level': self.rankLevel
}
)
if case('DISPATCH'):
case = Case(information)
if case('FINISH'):
self.state = 'DISCARD'
if self.dealerIndex is None:
self.dealerIndex = self.suitMain[1]
self.curPlayerIndex = self.dealerIndex
self.players[self.curPlayerIndex].cardInHand += self.discard_buffer
self.discard_buffer = CardList()
for player in self.players:
player.cardFront = CardList()
return (True, None, None)
return (False, None, None)
def _dispatch(self) -> List[int]:
newCardPool: List[Card] = random.sample(LevelUp.CARDPOOL, len(LevelUp.CARDPOOL))
dispatch = [newCardPool[0:25], newCardPool[25:50], newCardPool[50:75], newCardPool[75:100], newCardPool[100:]]
for id, player in enumerate(self.players):
player.cardInHand = CardList(dispatch[id])
self.discard_buffer = CardList(dispatch[-1])
return [[card.ID for card in cards] for cards in dispatch]
def isSuitable(self, cards: List[int], playerID: int, suit: Optional[CardSuit] = None):
#suit: 0 NT, 1 Spade, 2 Heart, 3 Club, 4 Diamond
if suit is None:
return [self.isSuitable(cards, playerID, s) for s in [
CardSuit.Joker, CardSuit.Spade, CardSuit.Heart, CardSuit.Club, CardSuit.Diamond
]]
cardnum = -1
case = Case(suit)
if case(CardSuit.Spade): cardnum = 39 + self.rankMain
elif case(CardSuit.Heart): cardnum = 26 + self.rankMain
elif case(CardSuit.Club): cardnum = 13 + self.rankMain
elif case(CardSuit.Diamond): cardnum = self.rankMain
if self.suitMain is None:
if suit == CardSuit.Joker:
return cards.count(52) == 2 or cards.count(53) == 2
else:
return cardnum in cards
elif self.suitMain[1] == playerID:
if self.suitMain[2] == 2: return False
if suit != self.suitMain[0]: return False
return cards.count(cardnum) == 2
else:
if suit == CardSuit.Joker:
if self.suitMain[0] == CardSuit.Joker:
return cards.count(53) == 2
else:
return cards.count(53) == 2 or cards.count(52) == 2
if self.suitMain[2] == 2: return False
return cards.count(cardnum) == 2
def suitRequest(self, playerID: int, suit: CardSuit):
cards = self.players[playerID].cardInHand.tolist()
if not self.isSuitable(cards, playerID, suit):
return False
for player in self.players:
player.cardFront = CardList()
cardnum = -1
case = Case(suit)
if case(CardSuit.Spade): cardnum = 39 + self.rankMain
elif case(CardSuit.Heart): cardnum = 26 + self.rankMain
elif case(CardSuit.Club): cardnum = 13 + self.rankMain
elif case(CardSuit.Diamond): cardnum = self.rankMain
if suit == CardSuit.Joker:
if cards.count(52) == 2:
self.suitMain = (CardSuit.Joker, playerID, 2)
self.players[playerID].cardFront += CardList([Card(52), Card(52)])
else:
self.suitMain = (CardSuit.Joker, playerID, 2)
self.players[playerID].cardFront += CardList([Card(53), Card(53)])
else:
if self.suitMain is None:
self.suitMain = (suit, playerID, 1)
self.players[playerID].cardFront += Card(cardnum)
else:
self.suitMain = (suit, playerID, 2)
self.players[playerID].cardFront += CardList([Card(cardnum), Card(cardnum)])
front = [player.cardFront.tolist() for player in self.players]
return [front, self.suitMain]
|
the-stack_0_11555 | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
""" Read data from the magnetometer and print it out, ASAP! """
import board
import adafruit_lsm303dlh_mag
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lsm303dlh_mag.LSM303DLH_Mag(i2c)
while True:
mag_x, mag_y, mag_z = sensor.magnetic
print("{0:10.3f} {1:10.3f} {2:10.3f}".format(mag_x, mag_y, mag_z))
|
the-stack_0_11556 | '''
File: pathtracker.py
Path tracking simulation with Stanley steering control and PID speed control.
author: Atsushi Sakai (@Atsushi_twi)
Ref:
- [Stanley: The robot that won the DARPA grand challenge](http://isl.ecst.csuchico.edu/DOCS/darpa2005/DARPA%202005%20Stanley.pdf)
- [Autonomous Automobile Path Tracking](https://www.ri.cmu.edu/pub_files/2009/2/Automatic_Steering_Methods_for_Autonomous_Automobile_Path_Tracking.pdf)
'''
import numpy as np
import math
class StanleyController(object):
MIN_THROTTLE = 0.0
MAX_THROTTLE = 1.0
def __init__(self, cfg):
self.cfg = cfg
self.k = 0.5 # control gain
self.Kp = cfg.KP # speed proportional gain
self.Kd = cfg.KD # speed diferential gain
self.Kta = 0.5 # accel to throttle ratio
self.maxaccel = cfg.MAX_ACCEL
self.L = 30 # [m] Wheel base of vehicle
self.x = 0.
self.y = 0.
self.camx = 105.
self.camy = 400.
self.yaw = -math.pi # Current yaw (birdseye frame)
self.v = 0.
self.throttle = 0. # current throttle setting
self.img_count = 0
def constant_speed_control(self,v_target,v_current,throttle):
"""
Proportional control for the speed.
:param target v: (float)
:param current v: (float)
:param previous v: (float)
:return target change in accel: (float)
"""
v_correction = self.Kp * (v_target - v_current)
current_accel = v_current - self.v
accel_delta = v_correction - current_accel
if accel_delta > self.maxaccel:
accel_delta = self.maxaccel
if accel_delta < -self.maxaccel:
accel_delta = -self.maxaccel
throttle = throttle + (accel_delta * self.Kta)
if throttle < self.MIN_THROTTLE:
throttle = self.MIN_THROTTLE
if throttle > self.MAX_THROTTLE:
throttle = self.MAX_THROTTLE
return throttle
def stanley_control(self, cx, cy, cyaw, v, last_target_idx):
"""
Stanley steering control.
:param state: (State object)
:param cx: ([float])
:param cy: ([float])
:param cyaw: ([float])
:param last_target_idx: (int)
:return: (float, int)
"""
current_target_idx, error_front_axle = self.calc_target_index(cx, cy)
if last_target_idx >= current_target_idx:
current_target_idx = last_target_idx
# theta_e corrects the heading error
theta_e = cyaw[current_target_idx] - self.yaw #self.normalize_angle(cyaw[current_target_idx] - self.yaw)
# theta_d corrects the cross track error
# theta_d = np.arctan2(self.k * error_front_axle, v)
# Steering control
delta = theta_e # + theta_d
return delta, current_target_idx
def normalize_angle(self,angle):
"""
Normalize an angle to [-pi, pi].
:param angle: (float)
:return: (float) Angle in radian in [-pi, pi]
"""
while angle > np.pi:
angle -= 2.0 * np.pi
while angle < -np.pi:
angle += 2.0 * np.pi
return angle
def calc_target_index(self, cx, cy):
"""
Compute index in the trajectory list of the target.
:param state: (State object)
:param cx: [float]
:param cy: [float]
:return: (int, float)
"""
# Calc front axle position
fx = self.camx + self.L * np.cos(self.yaw)
fy = self.camy + self.L * np.sin(self.yaw)
# Search nearest point index
dx = [fx - icx for icx in cx]
dy = [fy - icy for icy in cy]
d = np.hypot(dx, dy)
target_idx = np.argmin(d)
# Project RMS error onto front axle vector
front_axle_vec = [-np.cos(self.yaw + np.pi / 2),
-np.sin(self.yaw + np.pi / 2)]
error_front_axle = np.dot([dx[target_idx], dy[target_idx]], front_axle_vec)
return target_idx, error_front_axle
def run(self,img_count,x,y,yaw,velturn,velfwd,rax,ray,ryaw,speedprofile,runstate):
if img_count > self.img_count:
self.x = x
self.y = y
self.camx = 105
self.camy = 400
self.img_count = img_count
else:
dx = (x - self.x) * 100
dy = (y - self.y) * 100
self.camy = self.camy - (np.cos(yaw)*dy - np.sin(yaw)*dx) # rotate velocity by yaw angle to the camera frame
self.camx = self.camx + (np.sin(yaw)*dy + np.cos(yaw)*dx)
self.x = x
self.y = y
print(f'reuse situation {self.camx},{self.camy}')
v = np.abs(np.hypot(velfwd, velturn))
self.yaw = -np.pi/2 #np.arctan2(velfwd, velturn) - (np.pi / 2.)
if runstate == 'running':
target_idx, _ = self.calc_target_index(rax, ray)
target_speed = speedprofile[target_idx]
delta, target_idx = self.stanley_control(rax, ray, ryaw, v, target_idx)
# yaw_correction = delta - (np.arctan2(velfwd, velturn) + np.pi)
else: # if the car is not in a running state keep it stopped
target_speed = 0.0
delta = 0.0
# yaw_correction = 0.0
steering_angle = self.yaw + delta + np.pi
throttle = self.constant_speed_control(target_speed, v, self.throttle)
print(delta,steering_angle, v, target_speed, throttle, runstate)
self.throttle = throttle # for next time around
self.v = v
return self.camx,self.camy,delta,steering_angle,throttle
|
the-stack_0_11557 | import os
import json
from tabulate import tabulate
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.layers import Dense, Dropout, Input, LSTM
from keras.constraints import maxnorm
from keras.callbacks import EarlyStopping, ModelCheckpoint
from stochnet.classes.TimeSeriesDataset import TimeSeriesDataset
from stochnet.classes.NeuralNetworks import StochNeuralNetwork
from stochnet.classes.TopLayers import MultivariateNormalCholeskyOutputLayer, MixtureOutputLayer
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
current = os.getcwd()
working_path = os.path.dirname(current)
basename = os.path.abspath(working_path)
dataset_address = os.path.join(basename, 'dataset/SIR_dataset_upgraded_2.npy')
test_dataset_address = os.path.join(basename, 'dataset/SIR_dataset_upgraded_3.npy')
data_labels = {'Timestamps': 0, 'Susceptible': 1, 'Infected': 2, 'Removed': 3}
dataset = TimeSeriesDataset(dataset_address, labels=data_labels)
test_dataset = TimeSeriesDataset(test_dataset_address, labels=data_labels)
nb_past_timesteps = 5
dataset.format_dataset_for_ML(nb_past_timesteps=nb_past_timesteps, must_be_rescaled=True, percentage_of_test_data=0)
test_dataset.format_dataset_for_ML(nb_past_timesteps=nb_past_timesteps, must_be_rescaled=True, percentage_of_test_data=0)
X_train = dataset.X_train
X_test = test_dataset.X_train
Y_train = dataset.y_train
Y_test = test_dataset.y_train
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
input_tensor = Input(shape=(5, 3))
hidden1 = LSTM({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}),
recurrent_constraint=maxnorm({{uniform(1, 3)}}))(input_tensor)
dropout1 = Dropout({{uniform(0.2, 0.7)}})(hidden1)
NN_body = Dense({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(dropout1)
dropout2 = Dropout({{uniform(0.2, 0.7)}})(NN_body)
NN_body = Dense({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(dropout2)
number_of_components = 2
components = []
for j in range(number_of_components):
components.append(MultivariateNormalCholeskyOutputLayer(3))
TopModel_obj = MixtureOutputLayer(components)
NN = StochNeuralNetwork(input_tensor, NN_body, TopModel_obj)
callbacks = []
callbacks.append(EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='min'))
result = NN.fit(X_train, Y_train,
batch_size={{choice([512, 1024, 2048, 3072, 4096])}},
epochs={{choice([10, 15, 20, 40])}},
verbose=2,
callbacks=callbacks,
validation_data=(X_test, Y_test))
parameters = space
val_loss = min(result.history['val_loss'])
parameters["val_loss"] = val_loss
print('Validation loss: {0}'.format(val_loss))
if 'results' not in globals():
global results
results = []
results.append(parameters)
print(tabulate(results, headers="keys", tablefmt="fancy_grid", floatfmt=".8f"))
with open('/home/lpalmier/workspace/output/SIR/SIR_model_tuning_MNC_01.json', 'w') as f:
f.write(json.dumps(results))
return {'loss': val_loss, 'status': STATUS_OK, 'model': NN.model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=20,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
|
the-stack_0_11559 | """Support for deCONZ binary sensors."""
from pydeconz.sensor import Presence, Vibration
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import ATTR_DARK, ATTR_ON, NEW_SENSOR
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry, DeconzEntityHandler
ATTR_ORIENTATION = "orientation"
ATTR_TILTANGLE = "tiltangle"
ATTR_VIBRATIONSTRENGTH = "vibrationstrength"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up deCONZ platforms."""
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ binary sensor."""
gateway = get_gateway_from_config_entry(hass, config_entry)
entity_handler = DeconzEntityHandler(gateway)
@callback
def async_add_sensor(sensors, new=True):
"""Add binary sensor from deCONZ."""
entities = []
for sensor in sensors:
if new and sensor.BINARY:
new_sensor = DeconzBinarySensor(sensor, gateway)
entity_handler.add_entity(new_sensor)
entities.append(new_sensor)
async_add_entities(entities, True)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_SENSOR), async_add_sensor
)
)
async_add_sensor(gateway.api.sensors.values())
class DeconzBinarySensor(DeconzDevice, BinarySensorDevice):
"""Representation of a deCONZ binary sensor."""
@callback
def async_update_callback(self, force_update=False):
"""Update the sensor's state."""
changed = set(self._device.changed_keys)
keys = {"on", "reachable", "state"}
if force_update or any(key in changed for key in keys):
self.async_schedule_update_ha_state()
@property
def is_on(self):
"""Return true if sensor is on."""
return self._device.is_tripped
@property
def device_class(self):
"""Return the class of the sensor."""
return self._device.SENSOR_CLASS
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._device.SENSOR_ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attr = {}
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if self._device.type in Presence.ZHATYPE and self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
elif self._device.type in Vibration.ZHATYPE:
attr[ATTR_ORIENTATION] = self._device.orientation
attr[ATTR_TILTANGLE] = self._device.tiltangle
attr[ATTR_VIBRATIONSTRENGTH] = self._device.vibrationstrength
return attr
|
the-stack_0_11560 | #!/usr/bin/env python
"""settings.py
Udacity conference server-side Python App Engine app user settings
$Id$
created/forked from conference.py by wesc on 2014 may 24
"""
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '1009053430959-tdqqi86iai9gdqlods5m7mpoo1it0b0q.apps.googleusercontent.com'
ANDROID_CLIENT_ID = 'replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
|
the-stack_0_11561 | import subprocess
import logging
import sys
from contextlib import contextmanager
@contextmanager
def maybe_open1(out):
if isinstance(out, str):
with open(out, "ab") as f:
yield f
else:
yield out
@contextmanager
def maybe_open2(stdout, stderr):
with maybe_open1(stdout) as fout:
if isinstance(stderr, str):
if stderr == stdout:
yield fout, fout
else:
with open(stderr, "ab") as ferr:
yield fout, ferr
else:
yield fout, stderr
class Make:
def __init__(self, root_dir, args=[], stdout=None, stderr=None, verbose=False):
self._root_dir = root_dir
self._args = ["make"] + args
if not verbose:
self._args += ["-s", "--no-print-directory"]
self._proc_stdout = stdout
self._proc_stderr = stderr
def check_call(self, args):
args = self._args + args
logging.debug(f"Execute {args} in {self._root_dir}, stdout={self._proc_stdout}, stderr={self._proc_stderr}")
with maybe_open2(self._proc_stdout, self._proc_stderr) as (stdout, stderr):
subprocess.check_call(args,
cwd=self._root_dir,
stdout=stdout,
stderr=stderr
)
def check_output(self, args):
args = self._args + args
logging.debug(f"Execute {args} in {self._root_dir} ...")
with maybe_open1(self._proc_stderr) as stderr:
output = subprocess.check_output(args,
cwd=self._root_dir,
stderr=stderr
)
logging.debug(f"Output of {args} command: {output}")
return output
def get_output_lines(self, args):
out = self.check_output(args)
return [l.strip() for l in out.decode("utf-8").split("\n")]
|
the-stack_0_11562 | import numpy as np
def calc_jacobian(frames: list, transformations: dict, jsize: int) -> np.array:
"""
Args:
frames (list): frames to compute jacobian
transformations (dict): transformations from forward kinematics
thetas (int): size of joint space
Returns:
Jacobian (np.array(6, jsize)): return Jacobian
"""
target_position = list(transformations.values())[-1].pos
J = np.zeros((6, jsize))
n = 0
for frame in frames:
if frame.joint.dtype == "revolute":
n += 1
w = np.dot(transformations[frame.link.name].h_mat[:3, :3], frame.joint.axis)
v = np.cross(w, target_position - transformations[frame.link.name].pos)
J[:, n - 1] = np.hstack((v, w))
elif frame.joint.dtype == "prismatic":
n += 1
w = np.zeros(3)
v = np.dot(transformations[frame.link.name].h_mat[:3, :3], frame.joint.axis)
J[:, n - 1] = np.hstack((v, w))
return J
|
the-stack_0_11564 | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from PIL import Image
import cv2
import numpy as np
from ..adapters import Adapter
from ..representation import ImageProcessingPrediction, SuperResolutionPrediction, ContainerPrediction
from ..config import ConfigValidator, BoolField, StringField, DictField, NormalizationArgsField
from ..preprocessor import Normalize
class ImageProcessingAdapter(Adapter):
__provider__ = 'image_processing'
prediction_types = (ImageProcessingPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'reverse_channels': BoolField(
optional=True, default=False, description="Allow switching output image channels e.g. RGB to BGR"
),
'mean': NormalizationArgsField(
optional=True, default=0,
description='The value which should be added to prediction pixels for scaling to range [0, 255]'
'(usually it is the same mean value which subtracted in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_MEANS
),
'std': NormalizationArgsField(
optional=True, default=255,
description='The value on which prediction pixels should be multiplied for scaling to range '
'[0, 255] (usually it is the same scale (std) used in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_STDS,
allow_zeros=False
),
'target_out': StringField(optional=True, description='Target super resolution model output'),
"cast_to_uint8": BoolField(
optional=True, default=True, description="Cast prediction values to integer within [0, 255] range"
)
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT
)
def configure(self):
self.reverse_channels = self.get_value_from_config('reverse_channels')
self.mean = self.get_value_from_config('mean')
self.std = self.get_value_from_config('std')
self.target_out = self.get_value_from_config('target_out')
self.cast_to_uint8 = self.get_value_from_config('cast_to_uint8')
self.output_verified = False
def select_output_blob(self, outputs):
self.output_verified = True
if not self.target_out:
super().select_output_blob(outputs)
self.target_out = self.output_blob
return
self.target_out = self.check_output_name(self.target_out, outputs)
return
def process(self, raw, identifiers, frame_meta):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.output_verified:
self.select_output_blob(raw_outputs)
for identifier, out_img in zip(identifiers, raw_outputs[self.target_out]):
out_img = self._basic_postprocess(out_img)
result.append(ImageProcessingPrediction(identifier, out_img))
return result
def _basic_postprocess(self, img):
img = img.transpose((1, 2, 0)) if img.shape[-1] > 4 else img
img *= self.std
img += self.mean
if self.cast_to_uint8:
img = np.clip(img, 0., 255.)
img = img.astype(np.uint8)
if self.reverse_channels:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img, 'RGB') if Image is not None else img
img = np.array(img).astype(np.uint8)
return img
class SuperResolutionAdapter(ImageProcessingAdapter):
__provider__ = 'super_resolution'
prediction_types = (SuperResolutionPrediction, )
def process(self, raw, identifiers=None, frame_meta=None):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.output_verified:
self.select_output_blob(raw_outputs)
for identifier, img_sr in zip(identifiers, raw_outputs[self.target_out]):
img_sr = self._basic_postprocess(img_sr)
result.append(SuperResolutionPrediction(identifier, img_sr))
return result
class MultiSuperResolutionAdapter(Adapter):
__provider__ = 'multi_super_resolution'
prediction_types = (SuperResolutionPrediction, )
@property
def additional_output_mapping(self):
return getattr(self, '_additional_output_mapping', None)
@additional_output_mapping.setter
def additional_output_mapping(self, value):
self._additional_output_mapping = value
if hasattr(self, '_per_target_adapters'):
for adapter in self._per_target_adapters.values():
adapter.additional_output_mapping = value
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'reverse_channels': BoolField(
optional=True, default=False, description="Allow switching output image channels e.g. RGB to BGR"
),
'mean': NormalizationArgsField(
optional=True, default=0,
description='The value which should be added to prediction pixels for scaling to range [0, 255]'
'(usually it is the same mean value which subtracted in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_MEANS
),
'std': NormalizationArgsField(
optional=True, default=255,
description='The value on which prediction pixels should be multiplied for scaling to range '
'[0, 255] (usually it is the same scale (std) used in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_STDS,
allow_zeros=False
),
"cast_to_uint8": BoolField(
optional=True, default=True, description="Cast prediction values to integer within [0, 255] range"
),
'target_mapping': DictField(allow_empty=False, key_type=str, value_type=str)
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT
)
def configure(self):
self.target_mapping = self.get_value_from_config('target_mapping')
common_adapter_config = deepcopy(self.launcher_config)
self._per_target_adapters = {}
for key, output_name in self.target_mapping.items():
adapter_config = deepcopy(common_adapter_config)
adapter_config['target_out'] = output_name
self._per_target_adapters[key] = SuperResolutionAdapter(
adapter_config,
additional_output_mapping=self.additional_output_mapping
)
def process(self, raw, identifiers=None, frame_meta=None):
predictions = [{}] * len(identifiers)
for key, adapter in self._per_target_adapters.items():
result = adapter.process(raw, identifiers, frame_meta)
for batch_id, output_res in enumerate(result):
predictions[batch_id][key] = output_res
results = [ContainerPrediction(prediction_mapping) for prediction_mapping in predictions]
return results
class SuperResolutionYUV(Adapter):
__provider__ = 'super_resolution_yuv'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'y_output': StringField(),
'u_output': StringField(),
'v_output': StringField(),
'target_color': StringField(optional=True, choices=['bgr', 'rgb'], default='bgr')
})
return parameters
def configure(self):
self.y_output = self.get_value_from_config('y_output')
self.u_output = self.get_value_from_config('u_output')
self.v_output = self.get_value_from_config('v_output')
self.color = cv2.COLOR_YUV2BGR if self.get_value_from_config('target_color') == 'bgr' else cv2.COLOR_YUV2RGB
def get_image(self, y, u, v):
is_hwc = u.shape[-1] == 1
if not is_hwc:
y = np.transpose(y, (1, 2, 0))
u = np.transpose(u, (1, 2, 0))
v = np.transpose(v, (1, 2, 0))
h, w, __ = u.shape
u = u.reshape(h, w, 1)
v = v.reshape(h, w, 1)
u = cv2.resize(u, None, fx=2, fy=2)
v = cv2.resize(v, None, fx=2, fy=2)
y = y.reshape(2 * h, 2 * w, 1)
u = u.reshape(2 * h, 2 * w, 1)
v = v.reshape(2 * h, 2 * w, 1)
yuv = np.concatenate([y, u, v], axis=2)
image = cv2.cvtColor(yuv, self.color)
return image
def process(self, raw, identifiers=None, frame_meta=None):
outs = self._extract_predictions(raw, frame_meta)
results = []
for identifier, yres, ures, vres in zip(
identifiers, outs[self.y_output], outs[self.u_output], outs[self.v_output]
):
sr_img = self.get_image(yres, ures, vres)
results.append(SuperResolutionPrediction(identifier, sr_img))
return results
class TrimapAdapter(ImageProcessingAdapter):
__provider__ = 'trimap'
prediction_types = (ImageProcessingPrediction, )
def process(self, raw, identifiers, frame_meta):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.output_verified:
self.select_output_blob(raw_outputs)
for identifier, out_img, out_meta in zip(identifiers, raw_outputs[self.target_out], frame_meta):
tmap = np.expand_dims(out_meta['tmap'], axis=0)
C, _, W = out_img.shape
if C > 1 and W == 1:
out_img = np.transpose(out_img, [2, 0, 1])
out_img[tmap == 2] = 1
out_img[tmap == 0] = 0
out_img = self._basic_postprocess(out_img)
result.append(ImageProcessingPrediction(identifier, out_img))
return result
|
the-stack_0_11566 | from DataUploader.PgsqlDataUploader import PgsqlDataUploader
def clean():
input_file = 'csvfiles.json'
uploader = PgsqlDataUploader(input_file)
uploader.run_script("sql/AdventureWorks_postgres_drop.sql")
uploader.clean_up()
def prepare():
input_file = 'csvfiles.json'
uploader = PgsqlDataUploader(input_file)
uploader.run_script("sql/AdventureWorks_postgres_create_NoRels.sql")
uploader.clean_up()
def upload():
input_file = 'csvfiles.json'
uploader = PgsqlDataUploader(input_file)
uploader.upload_from_csv()
uploader.clean_up()
def execute(i):
switcher = {0: clean, 1: prepare, 2: upload}
func = switcher.get(i, lambda: 'Invalid')
return func()
if __name__ == '__main__':
"""
Little application to make some basic operations on Postgres Databases
- Clean, Drop, Upload data from scripts
- Data in CSV format
- Make use of psycopg2
Author: Andres Osorio
Date: 27/06/2021
Company: Phystech SAS
Client: DS4A Course
"""
execute(2)
print("All done")
|
the-stack_0_11567 |
import sys
from rlpyt.utils.launching.affinity import affinity_from_code
from rlpyt.samplers.parallel.gpu.sampler import GpuSampler
from rlpyt.samplers.parallel.gpu.collectors import GpuWaitResetCollector
from rlpyt.envs.atari.atari_env import AtariEnv, AtariTrajInfo
from rlpyt.algos.pg.a2c import A2C
from rlpyt.agents.pg.atari import AtariLstmAgent
from rlpyt.runners.minibatch_rl import MinibatchRl
from rlpyt.utils.logging.context import logger_context
from rlpyt.utils.launching.variant import load_variant, update_config
from rlpyt.experiments.configs.atari.pg.atari_lstm_a2c import configs
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = GpuSampler(
EnvCls=AtariEnv,
env_kwargs=config["env"],
CollectorCls=GpuWaitResetCollector,
TrajInfoCls=AtariTrajInfo,
**config["sampler"]
)
algo = A2C(optim_kwargs=config["optim"], **config["algo"])
agent = AtariLstmAgent(model_kwargs=config["model"], **config["agent"])
runner = MinibatchRl(
algo=algo,
agent=agent,
sampler=sampler,
affinity=affinity,
**config["runner"]
)
name = config["env"]["game"]
with logger_context(log_dir, run_ID, name, config):
runner.train()
if __name__ == "__main__":
build_and_train(*sys.argv[1:])
|
the-stack_0_11569 | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
self.m_resampler = torchaudio.transforms.Resample(
prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
|
the-stack_0_11571 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.PostListView.as_view(), name="post_list"),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r"^post/(?P<pk>\d+)$", views.PostDetailView.as_view(), name="post_detail"),
url(r"^post/new/$", views.CreatePostView.as_view(), name="post_new"),
url(r"^post/(?P<pk>\d+)/edit/$",
views.PostUpdateView.as_view(), name="post_edit"),
url(r"^post/(?P<pk>\d+)/publish/$",
views.post_publish, name="post_publish"),
url(r'^post/(?P<pk>\d+)/remove/$',
views.PostDeleteView.as_view(), name='post_remove'),
url(r'^drafts/$', views.DraftListView.as_view(), name='post_draft_list'),
url(r'^post/(?P<pk>\d+)/comment/$',
views.add_comment_to_post, name='add_comment_to_post'),
url(r'^comment/(?P<pk>\d+)/approve/$',
views.comment_approve, name='comment_approve'),
url(r'^comment/(?P<pk>\d+)/remove/$',
views.comment_remove, name='comment_remove'),
]
|
the-stack_0_11572 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "sana-khan-34437.botics.co"
site_params = {
"name": "Sana Khan",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
the-stack_0_11573 | # Import Flask
from flask import Flask, jsonify
# Dependencies and Setup
import numpy as np
import datetime as dt
# Python SQL Toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.pool import StaticPool
import dateutil.parser as dparser
# Database Setup by creating engine to the db path
engine = create_engine("sqlite:///Resources/hawaii.sqlite", echo=False)
# Reflect hawaii database into Base
Base = automap_base()
# Reflect all the tables in hawaii db
Base.prepare(engine, reflect=True)
# Create instances each Table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Setting-up Flask
# Initialize Flask app
app = Flask(__name__)
# set-up all the routes
@app.route("/api/v1.0/precipitation")
def percipation():
# Create our session (thread) from Python to the DB
session = Session(engine)
date = session.query(Measurement.date).order_by(Measurement.date.desc())[0][0]
latest_date = dt.datetime.strptime(date, "%Y-%m-%d").date()
latest_12 = latest_date - dt.timedelta(days=365)
percipitation_data = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).filter(Measurement.date >= latest_12 ).all()
session.close()
return dict(percipitation_data)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (thread) from Python to the DB
session = Session(engine)
stations = session.query(Station.id, Station.station).distinct().all()
session.close()
results = []
for row in stations:
station = {}
station["id"] = row[0]
station["station"] = row[1]
results.append(station)
return jsonify(results)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (thread) from Python to the DB
session = Session(engine)
active_stations = session.query(Measurement.id, Station.id, Measurement.station).\
filter(Station.station == Measurement.station).\
group_by(Measurement.station).\
order_by(Measurement.id.desc()).all()
most_active_station = active_stations[0][1]
recent_date = session.query(Measurement.date).filter(Station.station == Measurement.station).filter(Station.id == most_active_station).order_by(Measurement.date.desc())[0][0]
recent_date = dt.datetime.strptime(recent_date, "%Y-%m-%d").date()
recent_year = recent_date - dt.timedelta(days=365)
recent_year_temp = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= recent_year).order_by(Measurement.date.desc()).all()
session.close()
return dict(recent_year_temp)
@app.route("/api/v1.0/<start_date>")
def start_date(start_date):
session = Session(engine)
result = session.query(Measurement.date, func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.tobs).filter(Measurement.date >= start_date).first()
session.close()
aggre = {}
aggre["Date"]= result[0]
aggre["Min"] = result[1]
aggre["Max"] = result[2]
aggre["Average"] = result[3]
return aggre
@app.route("/api/v1.0/<start_date>/<end_date>")
def range_date(start_date, end_date):
session = Session(engine)
start_date = dt.datetime.strptime(start_date, "%Y-%m-%d").date()
start_date = dt.datetime.strptime(end_date, "%Y-%m-%d").date()
result = session.query(Measurement.date, func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= start_date)[0]
session.close()
aggre = {}
aggre["Date"]= result[0]
aggre["Min"] = result[1]
aggre["Max"] = result[2]
aggre["Average"] = result[3]
return aggre
@app.route("/api/v1.0/questions")
def questions():
return """<html>
<center>
<img src="/static/silly.png", alt="There you go!!!", width="700",height="680" />
</center>
</html>"""
# set-up Home routes
@app.route("/")
def welcomepage():
return """<html>
<h1>Welcome to Hawaii Climate Analysis!!!</h1>
<aside> By Shilpa...</aside>
<a href = "http://climate.geography.hawaii.edu/interactivemap.html" target = "_blank" ><img src="/static/hawaii_climate.png",width="718" height="135", alt="Hawaii Climate Analysis"/></a>
<section><h2><b>Analysis</b><img src="/static/Hawaii_surfing2.png",width="300",height="115", style="float:right", alt="Surf's up!!!"></h2>
<p><i>Below are the analysis performed on the Hawaii Climate data: </i></p>
<article>
<dt><li><b>Percipitation Data</b></li></dt>
<dd><a href="/api/v1.0/precipitation" target = "_blank">Percipitation(last 12-months)</a></dd>
<dd> <mark>Reurns 'Date' & 'Percipitation' for last 12-month period</mark></dd>
</article>
<dl><dt><li><b>Stations Data</b></li></dt>
<dd><a href="/api/v1.0/stations" target = "_blank">Most active Stations</a></dd>
<dd><mark>Returns List of Station 'id's' & 'station names' in Hawaii </mark></dd>
</dl>
<dl><dt><li><b>Temperature of Bias(Tobs)</b></li></dt>
<dd><a href="/api/v1.0/tobs" target = "_blank">Temperature of Bias for last 12-months</a></dd>
<dd><mark>Returns 'Date' & 'Temperature' of most active station in the last 12 month period </mark></dd>
</dl>
<dl><dt><li><b>MIN, MAX & AVERAGE Temperatures</b></li></dt>
<dd><a href="/api/v1.0/2016-8-23" target = "_blank">Temperature Aggregations starting 2016-8-23</a></dd>
<dd><a href="/api/v1.0/2017-6-23/2017-8-15" target = "_blank">Temperature Aggregations from 2016-8-23 to 2017-1-15</a></dd>
<dd><mark>Returns 'Min', 'Max' & 'Average' for the given date or range of dates</mark></dd>
</dl>
</section>
<section><h2><b>Question & Concerns</b></h2>
<dl>
<dd><h3><a href="/api/v1.0/questions" target = "_blank">Have Questions?? Contact-us here</a></h3></dd>
</dl>
</section>
</html>"""
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_11575 | import math
def get_odd(n):
while True:
if n%2:
return n
n//=2
def solve():
n=int(input())
c='Ashishgup'
o='FastestFinger'
while True:
if n<=1:
print(o)
break
if (n%2) or n==2:
print(c)
break
if not n&n-1:
print(o)
break
n//=get_odd(n)
c,o=o,c
if __name__ == '__main__':
t=int(input())
for _ in range(t):
solve()
|
the-stack_0_11576 | # -*- coding=utf-8 -*-
import os
from setuptools import setup, find_packages
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
LONGDOC = '''
jionlp
================================================================================
面向公司算法和使用部门提供算法api接口
安装方法:
代码使用 Python 3
- 半自动安装:
$ git clone http://git.bbdops.com/BBD-AI-Lab/BBD-Tools-Documentation.git
$ cd BBD-Tools-Documentation
$ pip install .
- 通过 import bbd_tools as bbd 来引用
'''
__name__ = 'jionlp'
__author__ = "cuiguoer"
__copyright__ = "Copyright 2020, dongrixinyu"
__credits__ = []
__license__ = "Apache License 2.0"
__maintainer__ = "dongrixinyu"
__email__ = "[email protected]"
__url__ = 'https://github.com/dongrixinyu/jionlp'
__description__ = 'Simple, Keras-powered multilingual NLP framework,' \
' allows you to build your models in 5 minutes for named entity recognition (NER),' \
' part-of-speech tagging (PoS) and text classification tasks. ' \
'Includes BERT, GPT-2 and word2vec embedding.'
with open(os.path.join(DIR_PATH, 'requirements.txt'),
'r', encoding='utf-8') as f:
requirements = f.readlines()
setup(name=__name__,
version='0.1.0',
url=__url__,
author=__author__,
author_email=__email__,
description=__description__,
long_description=LONGDOC,
license=__license__,
py_modules=[],
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
entry_points={
'console_scripts': [
# 'scheduler_start = algorithm_platform.scheduler.server: start',
]
},
test_suite='nose.collector',
tests_require=['nose'])
|
the-stack_0_11577 | """
This file offers the methods to automatically retrieve the graph Rhizobium leguminosarum viciae 3841.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def RhizobiumLeguminosarumViciae3841(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Rhizobium leguminosarum viciae 3841 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Rhizobium leguminosarum viciae 3841 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="RhizobiumLeguminosarumViciae3841",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_11578 | # -*- coding: utf-8 -*-
__author__ = 'ooo'
__date__ = '2019/1/15 12:17'
import math, torch
import torch.nn as nn
import torch.nn.functional as F
class ViewLayer(nn.Module):
def __init__(self, dim=-1):
super(ViewLayer, self).__init__()
self.dim = dim
def forward(self, x):
# print('view-layer -> ', x.size())
x = x.view(x.size(0), self.dim)
return x
class AdaAvgPool(nn.Module):
def __init__(self, size=0):
self.size = size
super(AdaAvgPool, self).__init__()
def forward(self, x):
# print('avg-layer -> ', x.size())
if self.size == -1:
return x
if self.size == 0:
h, w = x.size(2), x.size(3)
assert h == w
elif self.size >= 1:
h, w = self.size, self.size
else:
raise NotImplementedError('check the avg kernel size !')
return F.avg_pool2d(x, kernel_size=(h, w))
class Activate(nn.Module):
def __init__(self, method='relu'):
super(Activate, self).__init__()
if method == 'relu':
self.method = nn.ReLU(inplace=True)
elif method == 'sigmoid':
self.method = nn.Sigmoid()
elif method == 'leaky_relu':
self.method = nn.LeakyReLU(negative_slope=0.02)
else:
raise NotImplementedError('--->%s' % method)
def forward(self, x):
return self.method(x)
class SweetBlock(nn.Module):
def __init__(self, depth, inter=1, downexp=2, downsize=False):
super(SweetBlock, self).__init__()
self.downsize = downsize
self.bn1 = nn.BatchNorm2d(depth)
self.conv1 = nn.Conv2d(depth, depth * inter, 3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(depth * inter)
self.deconv2 = nn.ConvTranspose2d(depth * inter, depth, 3, stride=2, padding=1, output_padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
if downsize:
self.down1 = nn.Sequential(
nn.BatchNorm2d(depth),
nn.ReLU(inplace=True),
nn.Conv2d(depth, depth * downexp, 3, stride=1, padding=1, bias=False),
nn.AvgPool2d(2)
)
self.down2 = nn.Sequential(
nn.BatchNorm2d(depth),
nn.ReLU(inplace=True),
nn.Conv2d(depth, depth * downexp, 3, stride=1, padding=1, bias=False),
nn.AvgPool2d(2),
# nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
)
def forward(self, x):
if isinstance(x, (list, tuple)):
assert len(x) == 3, 'len of x is: %s ...' % len(x)
x1, x2, pred = x # (big, small, pred)
else:
x1, x2, pred = x, None, None
res1 = self.conv1(self.relu(self.bn1(x1)))
res2 = self.deconv2(self.relu(self.bn2(res1)))
res1 = res1 + x2
res2 = res2 + x1
if self.downsize:
res2 = self.down2(res2)
res1 = self.down1(res1)
# utils.print_size([res2, res1])
return res2, res1, pred
class TransBlock(nn.Module):
def __init__(self, indepth, outdepth):
super(TransBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(indepth)
self.conv1 = nn.Conv2d(indepth, outdepth, 3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(indepth)
self.conv2 = nn.Conv2d(indepth, outdepth, 3, stride=1, padding=1, bias=False)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1, x2, pred = x
else:
x1, x2, pred = x, None, None
x1 = self.conv1(F.relu(self.bn1(x1)))
x1 = F.avg_pool2d(x1, 2)
x2 = self.conv2(F.relu(self.bn2(x2)))
x2 = F.avg_pool2d(x2, 2)
return x1, x2
class SumaryBlock(nn.Module):
def __init__(self, depth, classify=1, avgpool=True, active='relu', nclass=1000):
super(SumaryBlock, self).__init__()
self.classify = classify
if self.classify >= 1:
self.classifier1 = nn.Sequential(
nn.BatchNorm2d(depth),
Activate(active),
AdaAvgPool(),
ViewLayer(),
nn.Linear(depth, nclass)
)
if self.classify >= 2:
self.classifier2 = nn.Sequential(
nn.BatchNorm2d(depth),
Activate(active),
AdaAvgPool(),
ViewLayer(),
nn.Linear(depth, nclass)
)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1, x2, pred = x
else:
x1, x2, pred = x, None, None
if self.classify == 1:
x1 = self.classifier1(x1)
pred.extend([x1])
elif self.classify == 2:
x1 = self.classifier1(x1)
x2 = self.classifier2(x2)
pred.extend([x2, x1])
else:
raise NotImplementedError
return pred
class RockBlock(nn.Module):
def __init__(self, outdepth, branch=2, dataset='cifar'):
super(RockBlock, self).__init__()
self.branch = branch
if dataset == 'cifar':
self.branch1 = nn.Sequential(
nn.Conv2d(3, outdepth, kernel_size=3, stride=1, padding=1, bias=False),
# nn.BatchNorm2d(depth),
# nn.ReLU(inplace=True),
)
if branch >= 2:
self.branch2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
elif dataset == 'imagenet':
self.branch1 = nn.Sequential(
nn.Conv2d(3, outdepth, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(outdepth),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.branch2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
pred = []
if self.branch == 1:
x = self.branch1(x)
return x, None, pred
elif self.branch == 2:
x = self.branch1(x)
x2 = self.branch2(x)
return x, x2, pred
else:
raise ValueError('check branch must be in [1, 2, 3]!')
class SweetNet(nn.Module):
def __init__(self, branch=2, depth=64, layers=(2, 3, 3, 3), expand=(1, 2, 4, 8), downexp=2, downlast=False,
inter=(1, 1, 1, 1), classify=1, active='relu', nclass=1000):
super(SweetNet, self).__init__()
self.layers = layers
self.layer0 = RockBlock(depth, branch, dataset='imagenet')
self.layer1 = self._make_sweet_layer(SweetBlock, layers[0], depth * expand[0], inter[0], downexp, down=True)
self.layer2 = self._make_sweet_layer(SweetBlock, layers[1], depth * expand[1], inter[1], downexp, down=True)
self.layer3 = self._make_sweet_layer(SweetBlock, layers[2], depth * expand[2], inter[2], downexp, down=True)
self.layer4 = self._make_sweet_layer(SweetBlock, layers[3], depth * expand[3], inter[3], downexp, down=downlast)
if downlast:
indepth = depth * expand[3] * downexp
else:
indepth = depth * expand[3]
self.classifier = SumaryBlock(indepth, classify, avgpool=True, active=active, nclass=nclass)
def _make_sweet_layer(self, block, nums, depth, inter=1, downexp=2, down=True):
layers = []
for i in range(nums - 1):
layers.append(block(depth, inter, downexp, downsize=False))
layers.append(block(depth, inter, downexp, downsize=down))
return nn.Sequential(*layers)
def _make_trans_layer(self, block, indepth, outdepth):
return block(indepth, outdepth)
def forward(self, x):
x = self.layer0(x)
# utils.print_size(x)
x = self.layer1(x)
# utils.print_size(x)
x = self.layer2(x)
# utils.print_size(x)
x = self.layer3(x)
# utils.print_size(x)
x = self.layer4(x)
# utils.print_size(x)
x = self.classifier(x)
return x
class CifarSweetNet(nn.Module):
def __init__(self, branch=2, depth=16, layers=(2, 3, 3), expand=(1, 2, 4), downexp=2, downlast=False,
inter=(1, 1, 1), classify=1, active='relu', nclass=10):
super(CifarSweetNet, self).__init__()
self.layers = layers
self.layer0 = RockBlock(depth, branch, dataset='cifar')
self.layer1 = self._make_sweet_layer(SweetBlock, layers[0], depth * expand[0], inter[0], downexp, down=True)
self.layer2 = self._make_sweet_layer(SweetBlock, layers[1], depth * expand[1], inter[1], downexp, down=True)
self.layer3 = self._make_sweet_layer(SweetBlock, layers[2], depth * expand[2], inter[2], downexp, down=downlast)
if downlast:
indepth = depth * expand[2] * downexp
else:
indepth = depth * expand[2]
self.classifier = SumaryBlock(indepth, classify, avgpool=True, active=active, nclass=nclass)
def _make_sweet_layer(self, block, nums, depth, inter=1, downexp=2, down=True):
layers = []
for i in range(nums - 1):
layers.append(block(depth, inter, downexp, downsize=False))
layers.append(block(depth, inter, downexp, downsize=down))
return nn.Sequential(*layers)
def _make_trans_layer(self, block, indepth, outdepth):
return block(indepth, outdepth)
def forward(self, x):
x = self.layer0(x)
# utils.print_size(x)
x = self.layer1(x)
# utils.print_size(x)
x = self.layer2(x)
# utils.print_size(x)
x = self.layer3(x)
# utils.print_size(x)
x = self.classifier(x)
return x
if __name__ == '__main__':
import xtils
torch.manual_seed(9528)
criterion = nn.CrossEntropyLoss()
# model = SweetNet(branch=2, depth=64, layers=(2, 5, 3, 2), expand=(1, 2, 4, 8), downexp=2, downlast=True,
# inter=(1, 1, 1, 1), classify=2, active='relu', nclass=1000)
# print('\n', model, '\n')
# x = torch.randn(4, 3, 256, 256)
# # utils.tensorboard_add_model(model, x)
# utils.calculate_params_scale(model, format='million')
# utils.calculate_layers_num(model, layers=('conv2d', 'deconv2d', 'linear'))
# y = model(x)
# print(sum(model.layers), len(y), ':', [(yy.shape, yy.max(1)) for yy in y if yy is not None])
arch_kwargs = {}
model = CifarSweetNet(branch=2, depth=16, layers=(2, 2, 2), expand=(1, 2, 4), downexp=2, downlast=False,
inter=(1, 1, 1), classify=1, active='relu', nclass=10)
print('\n', model, '\n')
x = torch.randn(4, 3, 32, 32)
# utils.tensorboard_add_model(model, x)
xtils.calculate_params_scale(model, format='million')
xtils.calculate_layers_num(model, layers=('conv2d', 'deconv2d', 'linear'))
y = model(x)
# loss = [criterion(o, torch.randint(0, 10, o.size()).long()) for o in y]
# optimizer = torch.optim.Adam(params=model.parameters(), lr=0.1)
# optimizer.zero_grad()
# sum(loss).backward()
# optimizer.step()
print(sum(model.layers), len(y), ':', [(yy.shape, yy.max(1)) for yy in y if yy is not None])
|
the-stack_0_11579 | import time
import sqlalchemy_1_3 as tsa
from sqlalchemy_1_3 import create_engine
from sqlalchemy_1_3 import event
from sqlalchemy_1_3 import exc
from sqlalchemy_1_3 import Integer
from sqlalchemy_1_3 import MetaData
from sqlalchemy_1_3 import pool
from sqlalchemy_1_3 import select
from sqlalchemy_1_3 import String
from sqlalchemy_1_3 import testing
from sqlalchemy_1_3 import util
from sqlalchemy_1_3.engine import url
from sqlalchemy_1_3.testing import assert_raises
from sqlalchemy_1_3.testing import assert_raises_message
from sqlalchemy_1_3.testing import assert_raises_message_context_ok
from sqlalchemy_1_3.testing import engines
from sqlalchemy_1_3.testing import eq_
from sqlalchemy_1_3.testing import expect_warnings
from sqlalchemy_1_3.testing import fixtures
from sqlalchemy_1_3.testing import is_false
from sqlalchemy_1_3.testing import is_true
from sqlalchemy_1_3.testing import mock
from sqlalchemy_1_3.testing import ne_
from sqlalchemy_1_3.testing.engines import testing_engine
from sqlalchemy_1_3.testing.mock import call
from sqlalchemy_1_3.testing.mock import Mock
from sqlalchemy_1_3.testing.mock import patch
from sqlalchemy_1_3.testing.schema import Column
from sqlalchemy_1_3.testing.schema import Table
from sqlalchemy_1_3.testing.util import gc_collect
class MockError(Exception):
pass
class MockDisconnect(MockError):
pass
class MockExitIsh(BaseException):
pass
def mock_connection():
def mock_cursor():
def execute(*args, **kwargs):
if conn.explode == "execute":
raise MockDisconnect("Lost the DB connection on execute")
elif conn.explode == "interrupt":
conn.explode = "explode_no_disconnect"
raise MockExitIsh("Keyboard / greenlet / etc interruption")
elif conn.explode == "interrupt_dont_break":
conn.explode = None
raise MockExitIsh("Keyboard / greenlet / etc interruption")
elif conn.explode in (
"execute_no_disconnect",
"explode_no_disconnect",
):
raise MockError(
"something broke on execute but we didn't lose the "
"connection"
)
elif conn.explode in (
"rollback",
"rollback_no_disconnect",
"explode_no_disconnect",
):
raise MockError(
"something broke on execute but we didn't lose the "
"connection"
)
elif args and "SELECT" in args[0]:
cursor.description = [("foo", None, None, None, None, None)]
else:
return
def close():
cursor.fetchall = cursor.fetchone = Mock(
side_effect=MockError("cursor closed")
)
cursor = Mock(
execute=Mock(side_effect=execute), close=Mock(side_effect=close)
)
return cursor
def cursor():
while True:
yield mock_cursor()
def rollback():
if conn.explode == "rollback":
raise MockDisconnect("Lost the DB connection on rollback")
if conn.explode == "rollback_no_disconnect":
raise MockError(
"something broke on rollback but we didn't lose the "
"connection"
)
else:
return
conn = Mock(
rollback=Mock(side_effect=rollback), cursor=Mock(side_effect=cursor())
)
return conn
def MockDBAPI():
connections = []
stopped = [False]
def connect():
while True:
if stopped[0]:
raise MockDisconnect("database is stopped")
conn = mock_connection()
connections.append(conn)
yield conn
def shutdown(explode="execute", stop=False):
stopped[0] = stop
for c in connections:
c.explode = explode
def restart():
stopped[0] = False
connections[:] = []
def dispose():
stopped[0] = False
for c in connections:
c.explode = None
connections[:] = []
return Mock(
connect=Mock(side_effect=connect()),
shutdown=Mock(side_effect=shutdown),
dispose=Mock(side_effect=dispose),
restart=Mock(side_effect=restart),
paramstyle="named",
connections=connections,
Error=MockError,
)
class PrePingMockTest(fixtures.TestBase):
def setup(self):
self.dbapi = MockDBAPI()
def _pool_fixture(self, pre_ping):
dialect = url.make_url(
"postgresql://foo:bar@localhost/test"
).get_dialect()()
dialect.dbapi = self.dbapi
_pool = pool.QueuePool(
creator=lambda: self.dbapi.connect("foo.db"),
pre_ping=pre_ping,
dialect=dialect,
)
dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
return _pool
def teardown(self):
self.dbapi.dispose()
def test_connect_across_restart(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
stale_connection = conn.connection
conn.close()
self.dbapi.shutdown("execute")
self.dbapi.restart()
conn = pool.connect()
cursor = conn.cursor()
cursor.execute("hi")
stale_cursor = stale_connection.cursor()
assert_raises(MockDisconnect, stale_cursor.execute, "hi")
def test_raise_db_is_stopped(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
assert_raises_message_context_ok(
MockDisconnect, "database is stopped", pool.connect
)
def test_waits_til_exec_wo_ping_db_is_stopped(self):
pool = self._pool_fixture(pre_ping=False)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
conn = pool.connect()
cursor = conn.cursor()
assert_raises_message(
MockDisconnect,
"Lost the DB connection on execute",
cursor.execute,
"foo",
)
def test_waits_til_exec_wo_ping_db_is_restarted(self):
pool = self._pool_fixture(pre_ping=False)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
self.dbapi.restart()
conn = pool.connect()
cursor = conn.cursor()
assert_raises_message(
MockDisconnect,
"Lost the DB connection on execute",
cursor.execute,
"foo",
)
@testing.requires.predictable_gc
def test_pre_ping_weakref_finalizer(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.cursor(), call.rollback()])
self.dbapi.shutdown("execute", stop=True)
self.dbapi.restart()
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.cursor(), call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(
old_dbapi_conn.mock_calls,
[call.cursor(), call.rollback(), call.cursor(), call.close()],
)
class MockReconnectTest(fixtures.TestBase):
def setup(self):
self.dbapi = MockDBAPI()
self.db = testing_engine(
"postgresql://foo:bar@localhost/test",
options=dict(module=self.dbapi, _initialize=False),
)
self.mock_connect = call(
host="localhost", password="bar", user="foo", database="test"
)
# monkeypatch disconnect checker
self.db.dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
def teardown(self):
self.dbapi.dispose()
def test_reconnect(self):
"""test that an 'is_disconnect' condition will invalidate the
connection, and additionally dispose the previous connection
pool and recreate."""
# make a connection
conn = self.db.connect()
# connection works
conn.execute(select([1]))
# create a second connection within the pool, which we'll ensure
# also goes away
conn2 = self.db.connect()
conn2.close()
# two connections opened total now
assert len(self.dbapi.connections) == 2
# set it to fail
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
# assert was invalidated
assert not conn.closed
assert conn.invalidated
# close shouldn't break
conn.close()
# ensure one connection closed...
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
conn = self.db.connect()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
conn.execute(select([1]))
conn.close()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
def test_invalidate_trans(self):
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert_raises_message(
tsa.exc.StatementError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select([1]),
)
assert trans.is_active
assert_raises_message(
tsa.exc.InvalidRequestError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
assert trans.is_active
trans.rollback()
assert not trans.is_active
conn.execute(select([1]))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidate_dont_call_finalizer(self):
conn = self.db.connect()
finalizer = mock.Mock()
conn.connection._connection_record.finalize_callback.append(finalizer)
conn.invalidate()
assert conn.invalidated
eq_(finalizer.call_count, 0)
def test_conn_reusable(self):
conn = self.db.connect()
conn.execute(select([1]))
eq_(self.dbapi.connect.mock_calls, [self.mock_connect])
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
# test reconnects
conn.execute(select([1]))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidated_close(self):
conn = self.db.connect()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
conn.close()
assert conn.closed
assert conn.invalidated
assert_raises_message(
tsa.exc.StatementError,
"This Connection is closed",
conn.execute,
select([1]),
)
def test_noreconnect_execute_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("execute_no_disconnect")
# raises error
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on execute but we didn't lose the connection",
conn.execute,
select([1]),
)
assert conn.closed
assert not conn.invalidated
def test_noreconnect_rollback_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("rollback_no_disconnect")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on rollback but we didn't "
"lose the connection",
conn.execute,
select([1]),
)
assert conn.closed
assert not conn.invalidated
assert_raises_message(
tsa.exc.StatementError,
"This Connection is closed",
conn.execute,
select([1]),
)
def test_reconnect_on_reentrant(self):
conn = self.db.connect()
conn.execute(select([1]))
assert len(self.dbapi.connections) == 1
self.dbapi.shutdown("rollback")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.execute,
select([1]),
)
assert not conn.closed
assert conn.invalidated
def test_reconnect_on_reentrant_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("rollback")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.execute,
select([1]),
)
assert conn.closed
assert conn.invalidated
assert_raises_message(
tsa.exc.StatementError,
"This Connection is closed",
conn.execute,
select([1]),
)
def test_check_disconnect_no_cursor(self):
conn = self.db.connect()
result = conn.execute(select([1]))
result.cursor.close()
conn.close()
assert_raises_message(
tsa.exc.DBAPIError, "cursor closed", list, result
)
def test_dialect_initialize_once(self):
from sqlalchemy_1_3.engine.url import URL
from sqlalchemy_1_3.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
engine = create_engine(MyURL("foo://"), module=dbapi)
engine.connect()
# note that the dispose() call replaces the old pool with a new one;
# this is to test that even though a single pool is using
# dispatch.exec_once(), by replacing the pool with a new one, the event
# would normally fire again onless once=True is set on the original
# listen as well.
engine.dispose()
engine.connect()
eq_(Dialect.initialize.call_count, 1)
def test_dialect_initialize_retry_if_exception(self):
from sqlalchemy_1_3.engine.url import URL
from sqlalchemy_1_3.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
# note that the first_connect hook is only invoked when the pool
# makes a new DBAPI connection, and not when it checks out an existing
# connection. So there is a dependency here that if the initializer
# raises an exception, the pool-level connection attempt is also
# failed, meaning no DBAPI connection is pooled. If the first_connect
# exception raise did not prevent the connection from being pooled,
# there could be the case where the pool could return that connection
# on a subsequent attempt without initialization having proceeded.
Dialect.initialize.side_effect = TypeError
engine = create_engine(MyURL("foo://"), module=dbapi)
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 1)
is_true(engine.pool._pool.empty())
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 2)
is_true(engine.pool._pool.empty())
engine.dispose()
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 3)
is_true(engine.pool._pool.empty())
Dialect.initialize.side_effect = None
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
engine.dispose()
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
def test_invalidate_conn_w_contextmanager_interrupt(self):
# test [ticket:3803]
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt")
def go():
with conn.begin():
conn.execute(select([1]))
assert_raises(MockExitIsh, go)
assert conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select([1]))
assert not conn.invalidated
def test_invalidate_conn_interrupt_nodisconnect_workaround(self):
# test [ticket:3803] workaround for no disconnect on keyboard interrupt
@event.listens_for(self.db, "handle_error")
def cancel_disconnect(ctx):
ctx.is_disconnect = False
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt_dont_break")
def go():
with conn.begin():
conn.execute(select([1]))
assert_raises(MockExitIsh, go)
assert not conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select([1]))
assert not conn.invalidated
def test_invalidate_conn_w_contextmanager_disconnect(self):
# test [ticket:3803] change maintains old behavior
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("execute")
def go():
with conn.begin():
conn.execute(select([1]))
assert_raises(exc.DBAPIError, go) # wraps a MockDisconnect
assert conn.invalidated
ne_(pool._invalidate_time, 0) # pool is invalidated
conn.execute(select([1]))
assert not conn.invalidated
class CursorErrTest(fixtures.TestBase):
# this isn't really a "reconnect" test, it's more of
# a generic "recovery". maybe this test suite should have been
# named "test_error_recovery".
def _fixture(self, explode_on_exec, initialize):
class DBAPIError(Exception):
pass
def MockDBAPI():
def cursor():
while True:
if explode_on_exec:
yield Mock(
description=[],
close=Mock(side_effect=DBAPIError("explode")),
execute=Mock(side_effect=DBAPIError("explode")),
)
else:
yield Mock(
description=[],
close=Mock(side_effect=Exception("explode")),
)
def connect():
while True:
yield Mock(
spec=["cursor", "commit", "rollback", "close"],
cursor=Mock(side_effect=cursor()),
)
return Mock(
Error=DBAPIError,
paramstyle="qmark",
connect=Mock(side_effect=connect()),
)
dbapi = MockDBAPI()
from sqlalchemy_1_3.engine import default
url = Mock(
get_dialect=lambda: default.DefaultDialect,
_get_entrypoint=lambda: default.DefaultDialect,
_instantiate_plugins=lambda kwargs: (),
translate_connect_args=lambda: {},
query={},
)
eng = testing_engine(
url, options=dict(module=dbapi, _initialize=initialize)
)
eng.pool.logger = Mock()
return eng
def test_cursor_explode(self):
db = self._fixture(False, False)
conn = db.connect()
result = conn.execute("select foo")
result.close()
conn.close()
eq_(
db.pool.logger.error.mock_calls,
[call("Error closing cursor", exc_info=True)],
)
def test_cursor_shutdown_in_initialize(self):
db = self._fixture(True, True)
assert_raises_message_context_ok(
exc.SAWarning, "Exception attempting to detect", db.connect
)
eq_(
db.pool.logger.error.mock_calls,
[call("Error closing cursor", exc_info=True)],
)
def _assert_invalidated(fn, *args):
try:
fn(*args)
assert False
except tsa.exc.DBAPIError as e:
if not e.connection_invalidated:
raise
class RealReconnectTest(fixtures.TestBase):
__backend__ = True
__requires__ = "graceful_disconnects", "ad_hoc_engines"
def setup(self):
self.engine = engines.reconnecting_engine()
def teardown(self):
self.engine.dispose()
def test_reconnect(self):
conn = self.engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
# one more time
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
conn.close()
def test_multiple_invalidate(self):
c1 = self.engine.connect()
c2 = self.engine.connect()
eq_(c1.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1.execute, select([1]))
p2 = self.engine.pool
_assert_invalidated(c2.execute, select([1]))
# pool isn't replaced
assert self.engine.pool is p2
def test_branched_invalidate_branch_to_parent(self):
c1 = self.engine.connect()
with patch.object(self.engine.pool, "logger") as logger:
c1_branch = c1.connect()
eq_(c1_branch.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1_branch.execute, select([1]))
assert c1.invalidated
assert c1_branch.invalidated
c1_branch._revalidate_connection()
assert not c1.invalidated
assert not c1_branch.invalidated
assert "Invalidate connection" in logger.mock_calls[0][1][0]
def test_branched_invalidate_parent_to_branch(self):
c1 = self.engine.connect()
c1_branch = c1.connect()
eq_(c1_branch.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1.execute, select([1]))
assert c1.invalidated
assert c1_branch.invalidated
c1._revalidate_connection()
assert not c1.invalidated
assert not c1_branch.invalidated
def test_branch_invalidate_state(self):
c1 = self.engine.connect()
c1_branch = c1.connect()
eq_(c1_branch.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1_branch.execute, select([1]))
assert not c1_branch.closed
assert not c1_branch._connection_is_valid
def test_ensure_is_disconnect_gets_connection(self):
def is_disconnect(e, conn, cursor):
# connection is still present
assert conn.connection is not None
# the error usually occurs on connection.cursor(),
# though MySQLdb we get a non-working cursor.
# assert cursor is None
self.engine.dialect.is_disconnect = is_disconnect
conn = self.engine.connect()
self.engine.test_shutdown()
with expect_warnings(
"An exception has occurred during handling .*", py2konly=True
):
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
def test_rollback_on_invalid_plain(self):
conn = self.engine.connect()
trans = conn.begin()
conn.invalidate()
trans.rollback()
@testing.requires.two_phase_transactions
def test_rollback_on_invalid_twophase(self):
conn = self.engine.connect()
trans = conn.begin_twophase()
conn.invalidate()
trans.rollback()
@testing.requires.savepoints
def test_rollback_on_invalid_savepoint(self):
conn = self.engine.connect()
conn.begin()
trans2 = conn.begin_nested()
conn.invalidate()
trans2.rollback()
def test_invalidate_twice(self):
conn = self.engine.connect()
conn.invalidate()
conn.invalidate()
@testing.skip_if(
[lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle"
)
def test_explode_in_initializer(self):
engine = engines.testing_engine()
def broken_initialize(connection):
connection.execute("select fake_stuff from _fake_table")
engine.dialect.initialize = broken_initialize
# raises a DBAPIError, not an AttributeError
assert_raises(exc.DBAPIError, engine.connect)
@testing.skip_if(
[lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle"
)
def test_explode_in_initializer_disconnect(self):
engine = engines.testing_engine()
def broken_initialize(connection):
connection.execute("select fake_stuff from _fake_table")
engine.dialect.initialize = broken_initialize
def is_disconnect(e, conn, cursor):
return True
engine.dialect.is_disconnect = is_disconnect
# invalidate() also doesn't screw up
assert_raises(exc.DBAPIError, engine.connect)
def test_null_pool(self):
engine = engines.reconnecting_engine(
options=dict(poolclass=pool.NullPool)
)
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
def test_close(self):
conn = self.engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
conn.close()
conn = self.engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
def test_with_transaction(self):
conn = self.engine.connect()
trans = conn.begin()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert_raises_message(
tsa.exc.StatementError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select([1]),
)
assert trans.is_active
assert_raises_message(
tsa.exc.InvalidRequestError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
assert trans.is_active
trans.rollback()
assert not trans.is_active
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
class RecycleTest(fixtures.TestBase):
__backend__ = True
def test_basic(self):
engine = engines.reconnecting_engine()
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
# set the pool recycle down to 1.
# we aren't doing this inline with the
# engine create since cx_oracle takes way
# too long to create the 1st connection and don't
# want to build a huge delay into this test.
engine.pool._recycle = 1
# kill the DB connection
engine.test_shutdown()
# wait until past the recycle period
time.sleep(2)
# can connect, no exception
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
class PrePingRealTest(fixtures.TestBase):
__backend__ = True
def test_pre_ping_db_is_restarted(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
stale_connection = conn.connection.connection
conn.close()
engine.test_shutdown()
engine.test_restart()
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
def exercise_stale_connection():
curs = stale_connection.cursor()
curs.execute("select 1")
assert_raises(engine.dialect.dbapi.Error, exercise_stale_connection)
def test_pre_ping_db_stays_shutdown(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
engine.test_shutdown(stop=True)
assert_raises(exc.DBAPIError, engine.connect)
class InvalidateDuringResultTest(fixtures.TestBase):
__backend__ = True
def setup(self):
self.engine = engines.reconnecting_engine()
self.meta = MetaData(self.engine)
table = Table(
"sometable",
self.meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
self.meta.create_all()
table.insert().execute(
[{"id": i, "name": "row %d" % i} for i in range(1, 100)]
)
def teardown(self):
self.meta.drop_all()
self.engine.dispose()
@testing.crashes(
"oracle",
"cx_oracle 6 doesn't allow a close like this due to open cursors",
)
@testing.fails_if(
["+mysqlconnector", "+mysqldb", "+cymysql", "+pymysql", "+pg8000"],
"Buffers the result set and doesn't check for connection close",
)
def test_invalidate_on_results(self):
conn = self.engine.connect()
result = conn.execute("select * from sometable")
for x in range(20):
result.fetchone()
self.engine.test_shutdown()
_assert_invalidated(result.fetchone)
assert conn.invalidated
|
the-stack_0_11580 | # pylint: disable=C,R,W
from datetime import datetime, timedelta
import inspect
import logging
import os
import re
import time
import traceback
from urllib import parse
from flask import (
flash, g, Markup, redirect, render_template, request, Response, url_for,
)
from flask_appbuilder import expose, SimpleFormView
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_appbuilder.security.views import AuthDBView
from flask_login import login_user
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
import pandas as pd
import simplejson as json
import sqlalchemy as sqla
from sqlalchemy import and_, create_engine, MetaData, or_, update
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import IntegrityError
from unidecode import unidecode
from werkzeug.routing import BaseConverter
from werkzeug.utils import secure_filename
from superset import (
app, appbuilder, cache, dashboard_import_export_util, db, results_backend,
security_manager, sql_lab, utils, viz, csrf)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource, SqlaTable
from superset.exceptions import SupersetException
from superset.forms import CsvToDatabaseForm
from superset.jinja_context import get_template_processor
from superset.legacy import cast_form_data, update_time_range
import superset.models.core as models
from superset.models.sql_lab import Query
from superset.models.user_attributes import UserAttribute
from superset.sql_parse import SupersetQuery
from superset.utils import (
merge_extra_filters, merge_request_params, QueryStatus,
)
from .base import (
api, BaseSupersetView,
check_ownership,
CsvResponse, DeleteMixin,
generate_download_headers, get_error_msg,
json_error_response, SupersetFilter, SupersetModelView, YamlExportMixin,
)
from .utils import bootstrap_user_data
config = app.config
stats_logger = config.get('STATS_LOGGER')
log_this = models.Log.log_this
DAR = models.DatasourceAccessRequest
ALL_DATASOURCE_ACCESS_ERR = __(
'This endpoint requires the `all_datasource_access` permission')
DATASOURCE_MISSING_ERR = __('The datasource seems to have been deleted')
ACCESS_REQUEST_MISSING_ERR = __(
'The access requests seem to have been deleted')
USER_MISSING_ERR = __('The user seems to have been deleted')
FORM_DATA_KEY_BLACKLIST = []
if not config.get('ENABLE_JAVASCRIPT_CONTROLS'):
FORM_DATA_KEY_BLACKLIST = [
'js_tooltip',
'js_onclick_href',
'js_data_mutator',
]
def get_database_access_error_msg(database_name):
return __('This view requires the database %(name)s or '
'`all_datasource_access` permission', name=database_name)
def json_success(json_msg, status=200):
return Response(json_msg, status=status, mimetype='application/json')
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners
def check_dbp_user(user, is_shared):
if app.config['ENABLE_CUSTOM_ROLE_RESOURCE_SHOW'] and not is_shared and user:
for role in user.roles:
if role.name.lower().find(app.config['CUSTOM_ROLE_NAME_KEYWORD'].lower()) >= 0:
return True
return False
class SliceFilter(SupersetFilter):
def apply(self, query, func): # noqa
if security_manager.all_datasource_access():
return query
perms = self.get_view_menus('datasource_access')
# TODO(bogdan): add `schema_access` support here
#if len(perms) > 0 :
if check_dbp_user(g.user, app.config['ENABLE_CHART_SHARE_IN_CUSTOM_ROLE']):
slice_ids = self.get_current_user_slice_ids()
return query.filter(self.model.perm.in_(perms)).filter(self.model.id.in_(slice_ids))
else:
return query.filter(self.model.perm.in_(perms))
#else:
# return query.filter(self.model.id.in_(slice_ids))
class DashboardFilter(SupersetFilter):
"""List dashboards for which users have access to at least one slice or are owners"""
def apply(self, query, func): # noqa
if security_manager.all_datasource_access():
return query
Slice = models.Slice # noqa
Dash = models.Dashboard # noqa
User = security_manager.user_model
# TODO(bogdan): add `schema_access` support here
datasource_perms = self.get_view_menus('datasource_access')
slice_ids_qry = None
if check_dbp_user(g.user, app.config['ENABLE_DASHBOARD_SHARE_IN_CUSTOM_ROLE']):
slice_ids = self.get_current_user_slice_ids()
slice_ids_qry = (
db.session
.query(Slice.id)
.filter(Slice.perm.in_(datasource_perms)).filter(Slice.id.in_(slice_ids))
)
else:
slice_ids_qry = (
db.session
.query(Slice.id)
.filter(Slice.perm.in_(datasource_perms))
)
owner_ids_qry = (
db.session
.query(Dash.id)
.join(Dash.owners)
.filter(User.id == User.get_user_id())
)
query = query.filter(
or_(Dash.id.in_(
db.session.query(Dash.id)
.distinct()
.join(Dash.slices)
.filter(Slice.id.in_(slice_ids_qry)),
), Dash.id.in_(owner_ids_qry)),
)
return query
class DatabaseView(SupersetModelView, DeleteMixin, YamlExportMixin): # noqa
datamodel = SQLAInterface(models.Database)
list_title = _('List Databases')
show_title = _('Show Database')
add_title = _('Add Database')
edit_title = _('Edit Database')
list_columns = [
'database_name', 'backend', 'allow_run_sync', 'allow_run_async',
'allow_dml', 'allow_csv_upload', 'creator', 'modified']
order_columns = [
'database_name', 'allow_run_sync', 'allow_run_async', 'allow_dml',
'modified', 'allow_csv_upload',
]
add_columns = [
'database_name', 'sqlalchemy_uri', 'cache_timeout', 'expose_in_sqllab',
'allow_run_sync', 'allow_run_async', 'allow_csv_upload',
'allow_ctas', 'allow_dml', 'force_ctas_schema', 'impersonate_user',
'allow_multi_schema_metadata_fetch', 'extra',
]
search_exclude_columns = (
'password', 'tables', 'created_by', 'changed_by', 'queries',
'saved_queries')
edit_columns = add_columns
show_columns = [
'tables',
'cache_timeout',
'extra',
'database_name',
'sqlalchemy_uri',
'perm',
'created_by',
'created_on',
'changed_by',
'changed_on',
]
add_template = 'superset/models/database/add.html'
edit_template = 'superset/models/database/edit.html'
base_order = ('changed_on', 'desc')
description_columns = {
'sqlalchemy_uri': utils.markdown(
'Refer to the '
'[SqlAlchemy docs]'
'(http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#'
'database-urls) '
'for more information on how to structure your URI.', True),
'expose_in_sqllab': _('Expose this DB in SQL Lab'),
'allow_run_sync': _(
'Allow users to run synchronous queries, this is the default '
'and should work well for queries that can be executed '
'within a web request scope (<~1 minute)'),
'allow_run_async': _(
'Allow users to run queries, against an async backend. '
'This assumes that you have a Celery worker setup as well '
'as a results backend.'),
'allow_ctas': _('Allow CREATE TABLE AS option in SQL Lab'),
'allow_dml': _(
'Allow users to run non-SELECT statements '
'(UPDATE, DELETE, CREATE, ...) '
'in SQL Lab'),
'force_ctas_schema': _(
'When allowing CREATE TABLE AS option in SQL Lab, '
'this option forces the table to be created in this schema'),
'extra': utils.markdown(
'JSON string containing extra configuration elements.<br/>'
'1. The ``engine_params`` object gets unpacked into the '
'[sqlalchemy.create_engine]'
'(http://docs.sqlalchemy.org/en/latest/core/engines.html#'
'sqlalchemy.create_engine) call, while the ``metadata_params`` '
'gets unpacked into the [sqlalchemy.MetaData]'
'(http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html'
'#sqlalchemy.schema.MetaData) call.<br/>'
'2. The ``metadata_cache_timeout`` is a cache timeout setting '
'in seconds for metadata fetch of this database. Specify it as '
'**"metadata_cache_timeout": {"schema_cache_timeout": 600}**. '
'If unset, cache will not be enabled for the functionality. '
'A timeout of 0 indicates that the cache never expires.<br/>'
'3. The ``schemas_allowed_for_csv_upload`` is a comma separated list '
'of schemas that CSVs are allowed to upload to. '
'Specify it as **"schemas_allowed": ["public", "csv_upload"]**. '
'If database flavor does not support schema or any schema is allowed '
'to be accessed, just leave the list empty', True),
'impersonate_user': _(
'If Presto, all the queries in SQL Lab are going to be executed as the '
'currently logged on user who must have permission to run them.<br/>'
'If Hive and hive.server2.enable.doAs is enabled, will run the queries as '
'service account, but impersonate the currently logged on user '
'via hive.server2.proxy.user property.'),
'allow_multi_schema_metadata_fetch': _(
'Allow SQL Lab to fetch a list of all tables and all views across '
'all database schemas. For large data warehouse with thousands of '
'tables, this can be expensive and put strain on the system.'),
'cache_timeout': _(
'Duration (in seconds) of the caching timeout for charts of this database. '
'A timeout of 0 indicates that the cache never expires. '
'Note this defaults to the global timeout if undefined.'),
'allow_csv_upload': _(
'If selected, please set the schemas allowed for csv upload in Extra.'),
}
label_columns = {
'expose_in_sqllab': _('Expose in SQL Lab'),
'allow_ctas': _('Allow CREATE TABLE AS'),
'allow_dml': _('Allow DML'),
'force_ctas_schema': _('CTAS Schema'),
'database_name': _('Database'),
'creator': _('Creator'),
'changed_on_': _('Last Changed'),
'sqlalchemy_uri': _('SQLAlchemy URI'),
'cache_timeout': _('Chart Cache Timeout'),
'extra': _('Extra'),
'allow_run_sync': _('Allow Run Sync'),
'allow_run_async': _('Allow Run Async'),
'impersonate_user': _('Impersonate the logged on user'),
'allow_csv_upload': _('Allow Csv Upload'),
'modified': _('Modified'),
'allow_multi_schema_metadata_fetch': _('Allow Multi Schema Metadata Fetch'),
'backend': _('Backend'),
}
def pre_add(self, db):
self.check_extra(db)
db.set_sqlalchemy_uri(db.sqlalchemy_uri)
security_manager.merge_perm('database_access', db.perm)
# adding a new database we always want to force refresh schema list
for schema in db.all_schema_names(force_refresh=True):
security_manager.merge_perm(
'schema_access', security_manager.get_schema_perm(db, schema))
def pre_update(self, db):
self.pre_add(db)
def pre_delete(self, obj):
if obj.tables:
raise SupersetException(Markup(
'Cannot delete a database that has tables attached. '
"Here's the list of associated tables: " +
', '.join('{}'.format(o) for o in obj.tables)))
def _delete(self, pk):
DeleteMixin._delete(self, pk)
def check_extra(self, db):
# this will check whether json.loads(extra) can succeed
try:
extra = db.get_extra()
except Exception as e:
raise Exception('Extra field cannot be decoded by JSON. {}'.format(str(e)))
# this will check whether 'metadata_params' is configured correctly
metadata_signature = inspect.signature(MetaData)
for key in extra.get('metadata_params', {}):
if key not in metadata_signature.parameters:
raise Exception('The metadata_params in Extra field '
'is not configured correctly. The key '
'{} is invalid.'.format(key))
appbuilder.add_link(
'Import Dashboards',
label=__('Import Dashboards'),
href='/superset/import_dashboards',
icon='fa-cloud-upload',
category='Manage',
category_label=__('Manage'),
category_icon='fa-wrench')
appbuilder.add_view(
DatabaseView,
'Databases',
label=__('Databases'),
icon='fa-database',
category='Sources',
category_label=__('Sources'),
category_icon='fa-database')
class DatabaseAsync(DatabaseView):
list_columns = [
'id', 'database_name',
'expose_in_sqllab', 'allow_ctas', 'force_ctas_schema',
'allow_run_async', 'allow_run_sync', 'allow_dml',
'allow_multi_schema_metadata_fetch', 'allow_csv_upload',
'allows_subquery',
]
appbuilder.add_view_no_menu(DatabaseAsync)
class CsvToDatabaseView(SimpleFormView):
form = CsvToDatabaseForm
form_template = 'superset/form_view/csv_to_database_view/edit.html'
form_title = _('CSV to Database configuration')
add_columns = ['database', 'schema', 'table_name']
def form_get(self, form):
form.sep.data = ','
form.header.data = 0
form.mangle_dupe_cols.data = True
form.skipinitialspace.data = False
form.skip_blank_lines.data = True
form.infer_datetime_format.data = True
form.decimal.data = '.'
form.if_exists.data = 'fail'
def form_post(self, form):
database = form.con.data
schema_name = form.schema.data or ''
if not self.is_schema_allowed(database, schema_name):
message = _('Database "{0}" Schema "{1}" is not allowed for csv uploads. '
'Please contact Superset Admin'.format(database.database_name,
schema_name))
flash(message, 'danger')
return redirect('/csvtodatabaseview/form')
csv_file = form.csv_file.data
form.csv_file.data.filename = secure_filename(form.csv_file.data.filename)
csv_filename = form.csv_file.data.filename
path = os.path.join(config['UPLOAD_FOLDER'], csv_filename)
try:
utils.ensure_path_exists(config['UPLOAD_FOLDER'])
csv_file.save(path)
if csv_filename.lower().endswith("csv"):
table = SqlaTable(table_name=form.name.data)
table.database = form.data.get('con')
table.database_id = table.database.id
table.database.db_engine_spec.create_table_from_csv(form, table)
elif csv_filename.lower().endswith("xls") or csv_filename.lower().endswith("xlsx"):
table = SqlaTable(table_name=form.name.data)
table.database = form.data.get('con')
table.database_id = table.database.id
table.database.db_engine_spec.create_table_from_excel(form, path)
except Exception as e:
try:
os.remove(path)
except OSError:
pass
message = 'Table name {} already exists. Please pick another'.format(
form.name.data) if isinstance(e, IntegrityError) else e
flash(
message,
'danger')
return redirect('/csvtodatabaseview/form')
os.remove(path)
# Go back to welcome page / splash screen
db_name = table.database.database_name
message = _('CSV file "{0}" uploaded to table "{1}" in '
'database "{2}"'.format(csv_filename,
form.name.data,
db_name))
flash(message, 'info')
return redirect('/tablemodelview/list/')
def is_schema_allowed(self, database, schema):
if not database.allow_csv_upload:
return False
schemas = database.get_schema_access_for_csv_upload()
if schemas:
return schema in schemas
return (security_manager.database_access(database) or
security_manager.all_datasource_access())
appbuilder.add_view_no_menu(CsvToDatabaseView)
class DatabaseTablesAsync(DatabaseView):
list_columns = ['id', 'all_table_names', 'all_schema_names']
appbuilder.add_view_no_menu(DatabaseTablesAsync)
if config.get('ENABLE_ACCESS_REQUEST'):
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
list_columns = [
'username', 'user_roles', 'datasource_link',
'roles_with_datasource', 'created_on']
order_columns = ['created_on']
base_order = ('changed_on', 'desc')
label_columns = {
'username': _('User'),
'user_roles': _('User Roles'),
'database': _('Database URL'),
'datasource_link': _('Datasource'),
'roles_with_datasource': _('Roles to grant'),
'created_on': _('Created On'),
}
appbuilder.add_view(
AccessRequestsModelView,
'Access requests',
label=__('Access requests'),
category='Security',
category_label=__('Security'),
icon='fa-table')
class SliceModelView(SupersetModelView, DeleteMixin): # noqa
route_base = '/chart'
datamodel = SQLAInterface(models.Slice)
list_title = _('List Charts')
show_title = _('Show Chart')
add_title = _('Add Chart')
edit_title = _('Edit Chart')
can_add = False
label_columns = {
'datasource_link': _('Datasource'),
}
search_columns = (
'slice_name', 'description', 'viz_type', 'datasource_name', 'owners',
)
list_columns = [
'slice_link', 'viz_type', 'datasource_link', 'creator', 'modified']
order_columns = ['viz_type', 'datasource_link', 'modified']
edit_columns = [
'slice_name', 'description', 'viz_type', 'owners', 'dashboards',
'params', 'cache_timeout']
base_order = ('changed_on', 'desc')
description_columns = {
'description': Markup(
'The content here can be displayed as widget headers in the '
'dashboard view. Supports '
'<a href="https://daringfireball.net/projects/markdown/"">'
'markdown</a>'),
'params': _(
'These parameters are generated dynamically when clicking '
'the save or overwrite button in the explore view. This JSON '
'object is exposed here for reference and for power users who may '
'want to alter specific parameters.',
),
'cache_timeout': _(
'Duration (in seconds) of the caching timeout for this chart. '
'Note this defaults to the datasource/table timeout if undefined.'),
}
base_filters = [['id', SliceFilter, lambda: []]]
label_columns = {
'cache_timeout': _('Cache Timeout'),
'creator': _('Creator'),
'dashboards': _('Dashboards'),
'datasource_link': _('Datasource'),
'description': _('Description'),
'modified': _('Last Modified'),
'owners': _('Owners'),
'params': _('Parameters'),
'slice_link': _('Chart'),
'slice_name': _('Name'),
'table': _('Table'),
'viz_type': _('Visualization Type'),
}
def pre_add(self, obj):
utils.validate_json(obj.params)
def pre_update(self, obj):
utils.validate_json(obj.params)
check_ownership(obj)
def pre_delete(self, obj):
check_ownership(obj)
@expose('/add', methods=['GET', 'POST'])
@has_access
def add(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
{'value': str(d.id) + '__' + d.type, 'label': repr(d)}
for d in datasources
]
return self.render_template(
'superset/add_slice.html',
bootstrap_data=json.dumps({
'datasources': sorted(datasources, key=lambda d: d['label']),
}),
)
appbuilder.add_view(
SliceModelView,
'Charts',
label=__('Charts'),
icon='fa-bar-chart',
category='',
category_icon='')
class SliceAsync(SliceModelView): # noqa
route_base = '/sliceasync'
list_columns = [
'id', 'slice_link', 'viz_type', 'slice_name',
'creator', 'modified', 'icons']
label_columns = {
'icons': ' ',
'slice_link': _('Chart'),
}
appbuilder.add_view_no_menu(SliceAsync)
class SliceAddView(SliceModelView): # noqa
route_base = '/sliceaddview'
list_columns = [
'id', 'slice_name', 'slice_url', 'edit_url', 'viz_type', 'params',
'description', 'description_markeddown', 'datasource_id', 'datasource_type',
'datasource_name_text', 'datasource_link',
'owners', 'modified', 'changed_on']
appbuilder.add_view_no_menu(SliceAddView)
class DashboardModelView(SupersetModelView, DeleteMixin): # noqa
route_base = '/dashboard'
datamodel = SQLAInterface(models.Dashboard)
list_title = _('List Dashboards')
show_title = _('Show Dashboard')
add_title = _('Add Dashboard')
edit_title = _('Edit Dashboard')
list_columns = ['dashboard_link', 'creator', 'modified']
order_columns = ['modified']
edit_columns = [
'dashboard_title', 'slug', 'owners', 'position_json', 'css',
'json_metadata']
show_columns = edit_columns + ['table_names', 'slices']
search_columns = ('dashboard_title', 'slug', 'owners')
add_columns = edit_columns
base_order = ('changed_on', 'desc')
description_columns = {
'position_json': _(
'This json object describes the positioning of the widgets in '
'the dashboard. It is dynamically generated when adjusting '
'the widgets size and positions by using drag & drop in '
'the dashboard view'),
'css': _(
'The css for individual dashboards can be altered here, or '
'in the dashboard view where changes are immediately '
'visible'),
'slug': _('To get a readable URL for your dashboard'),
'json_metadata': _(
'This JSON object is generated dynamically when clicking '
'the save or overwrite button in the dashboard view. It '
'is exposed here for reference and for power users who may '
'want to alter specific parameters.'),
'owners': _('Owners is a list of users who can alter the dashboard.'),
}
base_filters = [['slice', DashboardFilter, lambda: []]]
label_columns = {
'dashboard_link': _('Dashboard'),
'dashboard_title': _('Title'),
'slug': _('Slug'),
'slices': _('Charts'),
'owners': _('Owners'),
'creator': _('Creator'),
'modified': _('Modified'),
'position_json': _('Position JSON'),
'css': _('CSS'),
'json_metadata': _('JSON Metadata'),
'table_names': _('Underlying Tables'),
}
def pre_add(self, obj):
obj.slug = obj.slug.strip() or None
if obj.slug:
obj.slug = obj.slug.replace(' ', '-')
obj.slug = re.sub(r'[^\w\-]+', '', obj.slug)
if g.user not in obj.owners:
obj.owners.append(g.user)
utils.validate_json(obj.json_metadata)
utils.validate_json(obj.position_json)
owners = [o for o in obj.owners]
for slc in obj.slices:
slc.owners = list(set(owners) | set(slc.owners))
def pre_update(self, obj):
check_ownership(obj)
self.pre_add(obj)
def pre_delete(self, obj):
check_ownership(obj)
@action('mulexport', __('Export'), __('Export dashboards?'), 'fa-database')
def mulexport(self, items):
if not isinstance(items, list):
items = [items]
ids = ''.join('&id={}'.format(d.id) for d in items)
return redirect(
'/dashboard/export_dashboards_form?{}'.format(ids[1:]))
@expose('/export_dashboards_form')
def download_dashboards(self):
if request.args.get('action') == 'go':
ids = request.args.getlist('id')
return Response(
models.Dashboard.export_dashboards(ids),
headers=generate_download_headers('json'),
mimetype='application/text')
return self.render_template(
'superset/export_dashboards.html',
dashboards_url='/dashboard/list',
)
appbuilder.add_view(
DashboardModelView,
'Dashboards',
label=__('Dashboards'),
icon='fa-dashboard',
category='',
category_icon='')
class DashboardModelViewAsync(DashboardModelView): # noqa
route_base = '/dashboardasync'
list_columns = [
'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',
'changed_on', 'url', 'changed_by_name',
]
label_columns = {
'dashboard_link': _('Dashboard'),
'dashboard_title': _('Title'),
'creator': _('Creator'),
'modified': _('Modified'),
}
appbuilder.add_view_no_menu(DashboardModelViewAsync)
class DashboardAddView(DashboardModelView): # noqa
route_base = '/dashboardaddview'
list_columns = [
'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',
'changed_on', 'url', 'changed_by_name',
]
show_columns = list(set(DashboardModelView.edit_columns + list_columns))
appbuilder.add_view_no_menu(DashboardAddView)
class LogModelView(SupersetModelView):
datamodel = SQLAInterface(models.Log)
list_title = _('List Log')
show_title = _('Show Log')
add_title = _('Add Log')
edit_title = _('Edit Log')
list_columns = ('user', 'action', 'local_dttm')
edit_columns = ('user', 'action', 'dttm', 'json')
base_order = ('dttm', 'desc')
label_columns = {
'user': _('User'),
'action': _('Action'),
'local_dttm': _('Time'),
'json': _('JSON'),
}
appbuilder.add_view(
LogModelView,
'Action Log',
label=__('Action Log'),
category='Security',
category_label=__('Security'),
icon='fa-list-ol')
@app.route('/health')
def health():
return 'OK'
@app.route('/healthcheck')
def healthcheck():
return 'OK'
@app.route('/ping')
def ping():
return 'OK'
@csrf.exempt
@app.route('/add_user_from_dbp', methods=['POST'])
def add_user_from_dbp():
raw_user_info = request.data
user_info = json.loads(raw_user_info, encoding='utf-8')
try:
username = user_info.get('username', None)
first_name = user_info.get('first_name', None)
last_name = user_info.get('last_name', None)
email = user_info.get('email', None)
password = user_info.get('password', None)
user_role = user_info.get('role', config.get('CUSTOM_ROLE_NAME_KEYWORD'))
if not username and not email:
return json_error_response(
'username and email are missing.')
user = security_manager.find_user(username, email)
if user:
return json_error_response(
'User with name(%s) or email(%s) exist.' % (username, email))
role = security_manager.find_role(user_role)
if not role:
return json_error_response(
'Role with name(%s) not exist.' % (user_role,))
user = security_manager.add_user(username=username, first_name=first_name, last_name=last_name, email=email,
role=role, password=password)
resp = json_success(json.dumps(
{'user_id': user.id}, default=utils.json_int_dttm_ser,
ignore_nan=True), status=200)
return resp
except Exception:
return json_error_response(
'Error in call add_user_from_dbp.'
'The error message returned was:\n{}').format(traceback.format_exc())
class KV(BaseSupersetView):
"""Used for storing and retrieving key value pairs"""
@log_this
@expose('/store/', methods=['POST'])
def store(self):
try:
value = request.form.get('data')
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(
json.dumps({'id': obj.id}),
status=200)
@log_this
@expose('/<key_id>/', methods=['GET'])
def get_value(self, key_id):
kv = None
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).one()
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200)
appbuilder.add_view_no_menu(KV)
class R(BaseSupersetView):
"""used for short urls"""
@log_this
@expose('/<url_id>')
def index(self, url_id):
url = db.session.query(models.Url).filter_by(id=url_id).first()
if url:
return redirect('/' + url.url)
else:
flash('URL to nowhere...', 'danger')
return redirect('/')
@log_this
@expose('/shortner/', methods=['POST', 'GET'])
def shortner(self):
url = request.form.get('data')
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
'{scheme}://{request.headers[Host]}/r/{obj.id}'.format(
scheme=request.scheme, request=request, obj=obj),
mimetype='text/plain')
@expose('/msg/')
def msg(self):
"""Redirects to specified url while flash a message"""
flash(Markup(request.args.get('msg')), 'info')
return redirect(request.args.get('url'))
appbuilder.add_view_no_menu(R)
class Superset(BaseSupersetView):
"""The base views for Superset!"""
@has_access_api
@expose('/datasources/')
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources]
datasources = sorted(datasources, key=lambda o: o['name'])
return self.json_response(datasources)
@has_access_api
@expose('/override_role_permissions/', methods=['POST'])
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data['role_name']
databases = data['database']
db_ds_names = set()
for dbs in databases:
for schema in dbs['schema']:
for ds_name in schema['datasources']:
fullname = utils.get_datasource_full_name(
dbs['name'], ds_name, schema=schema['name'])
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm,
permission_name='datasource_access')
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response({
'granted': granted_perms,
'requested': list(db_ds_names),
}, status=201)
@log_this
@has_access
@expose('/request_access/')
def request_access(self):
datasources = set()
dashboard_id = request.args.get('dashboard_id')
if dashboard_id:
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(dashboard_id))
.one()
)
datasources |= dash.datasources
datasource_id = request.args.get('datasource_id')
datasource_type = request.args.get('datasource_type')
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class)
.filter_by(id=int(datasource_id))
.one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
))
if has_access:
return redirect('/superset/dashboard/{}'.format(dashboard_id))
if request.args.get('action') == 'go':
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id,
datasource_type=datasource.type)
db.session.add(access_request)
db.session.commit()
flash(__('Access was requested'), 'info')
return redirect('/')
return self.render_template(
'superset/request_access.html',
datasources=datasources,
datasource_names=', '.join([o.name for o in datasources]),
)
@log_this
@has_access
@expose('/approve')
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session)
user = security_manager.get_user_by_id(r.created_by_fk)
if not datasource or \
security_manager.datasource_access(datasource, user):
# datasource does not exist anymore
session.delete(r)
session.commit()
datasource_type = request.args.get('datasource_type')
datasource_id = request.args.get('datasource_id')
created_by_username = request.args.get('created_by')
role_to_grant = request.args.get('role_to_grant')
role_to_extend = request.args.get('role_to_extend')
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session)
if not datasource:
flash(DATASOURCE_MISSING_ERR, 'alert')
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, 'alert')
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, 'alert')
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
# check if you can approve
if security_manager.all_datasource_access() or g.user.id == datasource.owner_id:
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
'%(user)s was granted the role %(role)s that gives access '
'to the %(datasource)s',
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_granted.txt', app.config)
flash(msg, 'info')
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
'email/datasource_access', datasource.perm)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __('Role %(r)s was extended to provide the access to '
'the datasource %(ds)s', r=role_to_extend,
ds=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_extended.txt', app.config)
flash(msg, 'info')
clean_fulfilled_requests(session)
else:
flash(__('You have no permission to approve this request'),
'danger')
return redirect('/accessrequestsmodelview/list/')
for r in requests:
session.delete(r)
session.commit()
return redirect('/accessrequestsmodelview/list/')
def get_form_data(self, slice_id=None, use_slice_data=False):
form_data = {}
post_data = request.form.get('form_data')
request_args_data = request.args.get('form_data')
# Supporting POST
if post_data:
form_data.update(json.loads(post_data))
# request params can overwrite post body
if request_args_data:
form_data.update(json.loads(request_args_data))
url_id = request.args.get('r')
if url_id:
saved_url = db.session.query(models.Url).filter_by(id=url_id).first()
if saved_url:
url_str = parse.unquote_plus(
saved_url.url.split('?')[1][10:], encoding='utf-8', errors=None)
url_form_data = json.loads(url_str)
# allow form_date in request override saved url
url_form_data.update(form_data)
form_data = url_form_data
if request.args.get('viz_type'):
# Converting old URLs
form_data = cast_form_data(form_data)
form_data = {
k: v
for k, v in form_data.items()
if k not in FORM_DATA_KEY_BLACKLIST
}
# When a slice_id is present, load from DB and override
# the form_data from the DB with the other form_data provided
slice_id = form_data.get('slice_id') or slice_id
slc = None
# Check if form data only contains slice_id
contains_only_slc_id = not any(key != 'slice_id' for key in form_data)
# Include the slice_form_data if request from explore or slice calls
# or if form_data only contains slice_id
if slice_id and (use_slice_data or contains_only_slc_id):
slc = db.session.query(models.Slice).filter_by(id=slice_id).first()
slice_form_data = slc.form_data.copy()
# allow form_data in request override slice from_data
slice_form_data.update(form_data)
form_data = slice_form_data
update_time_range(form_data)
return form_data, slc
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = (
db.session.query(models.Slice)
.filter_by(id=slice_id)
.one()
)
return slc.get_viz()
else:
viz_type = form_data.get('viz_type', 'table')
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
viz_obj = viz.viz_types[viz_type](
datasource,
form_data=form_data,
force=force,
)
return viz_obj
@has_access
@expose('/slice/<slice_id>/')
def slice(self, slice_id):
form_data, slc = self.get_form_data(slice_id, use_slice_data=True)
endpoint = '/superset/explore/?form_data={}'.format(
parse.quote(json.dumps(form_data)),
)
if request.args.get('standalone') == 'true':
endpoint += '&standalone=true'
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logging.exception(e)
return json_error_response(e)
if query_obj and query_obj['prequeries']:
query_obj['prequeries'].append(query)
query = ';\n\n'.join(query_obj['prequeries'])
if query:
query += ';'
else:
query = 'No query.'
return self.json_response({
'query': query,
'language': viz_obj.datasource.query_language,
})
def get_raw_results(self, viz_obj):
return self.json_response({
'data': viz_obj.get_df().to_dict('records'),
})
def get_samples(self, viz_obj):
return self.json_response({
'data': viz_obj.get_samples(),
})
def generate_json(
self, datasource_type, datasource_id, form_data,
csv=False, query=False, force=False, results=False,
samples=False,
):
try:
viz_obj = self.get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
except Exception as e:
logging.exception(e)
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
if not security_manager.datasource_access(viz_obj.datasource, g.user):
return json_error_response(
security_manager.get_datasource_access_error_msg(viz_obj.datasource),
status=404,
link=security_manager.get_datasource_access_link(viz_obj.datasource))
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers('csv'),
mimetype='application/csv')
if query:
return self.get_query_string_response(viz_obj)
if results:
return self.get_raw_results(viz_obj)
if samples:
return self.get_samples(viz_obj)
try:
payload = viz_obj.get_payload()
except SupersetException as se:
logging.exception(se)
return json_error_response(utils.error_msg_from_exception(se),
status=se.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e))
status = 200
if (
payload.get('status') == QueryStatus.FAILED or
payload.get('error') is not None
):
status = 400
return json_success(viz_obj.json_dumps(payload), status=status)
@log_this
@has_access_api
@expose('/slice_json/<slice_id>')
def slice_json(self, slice_id):
try:
form_data, slc = self.get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
except Exception as e:
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return self.generate_json(datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data)
@log_this
@has_access_api
@expose('/annotation_json/<layer_id>')
def annotation_json(self, layer_id):
form_data = self.get_form_data()[0]
form_data['layer_id'] = layer_id
form_data['filters'] = [{'col': 'layer_id',
'op': '==',
'val': layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types['table'](
datasource,
form_data=form_data,
force=False,
)
try:
payload = viz_obj.get_payload()
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e))
status = 200
if payload.get('status') == QueryStatus.FAILED:
status = 400
return json_success(viz_obj.json_dumps(payload), status=status)
@log_this
@has_access_api
@expose('/explore_json/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])
@expose('/explore_json/', methods=['GET', 'POST'])
def explore_json(self, datasource_type=None, datasource_id=None):
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
csv = request.args.get('csv') == 'true'
query = request.args.get('query') == 'true'
results = request.args.get('results') == 'true'
samples = request.args.get('samples') == 'true'
force = request.args.get('force') == 'true'
try:
form_data = self.get_form_data()[0]
datasource_id, datasource_type = self.datasource_info(
datasource_id, datasource_type, form_data)
except Exception as e:
logging.exception(e)
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return self.generate_json(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
csv=csv,
query=query,
results=results,
force=force,
samples=samples,
)
@log_this
@has_access
@expose('/import_dashboards', methods=['GET', 'POST'])
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get('file')
if request.method == 'POST' and f:
dashboard_import_export_util.import_dashboards(db.session, f.stream)
return redirect('/dashboard/list/')
return self.render_template('superset/import_dashboards.html')
@log_this
@has_access
@expose('/explorev2/<datasource_type>/<datasource_id>/')
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args))
@staticmethod
def datasource_info(datasource_id, datasource_type, form_data):
"""Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '')
if '__' in datasource:
datasource_id, datasource_type = datasource.split('__')
# The case where the datasource has been deleted
datasource_id = None if datasource_id == 'None' else datasource_id
if not datasource_id:
raise Exception(
'The datasource associated with this chart no longer exists')
datasource_id = int(datasource_id)
return datasource_id, datasource_type
@log_this
@has_access
@expose('/explore/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])
@expose('/explore/', methods=['GET', 'POST'])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = self.get_form_data(use_slice_data=True)
datasource_id, datasource_type = self.datasource_info(
datasource_id, datasource_type, form_data)
error_redirect = '/chart/list/'
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
flash(DATASOURCE_MISSING_ERR, 'danger')
return redirect(error_redirect)
if config.get('ENABLE_ACCESS_REQUEST') and (
not security_manager.datasource_access(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
'datasource_type={datasource_type}&'
'datasource_id={datasource_id}&'
''.format(**locals()))
viz_type = form_data.get('viz_type')
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# slc perms
slice_add_perm = security_manager.can_access('can_add', 'SliceModelView')
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
'can_download', 'SliceModelView')
form_data['datasource'] = str(datasource_id) + '__' + datasource_type
# On explore, merge legacy and extra filters into the form data
utils.convert_legacy_filters_into_adhoc(form_data)
merge_extra_filters(form_data)
# merge request url params
if request.method == 'GET':
merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get('action')
if action == 'overwrite' and not slice_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') + _('chart'),
status=400)
if action == 'saveas' and not slice_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('chart'),
status=400)
if action in ('saveas', 'overwrite'):
return self.save_or_overwrite_slice(
request.args,
slc, slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name)
standalone = request.args.get('standalone') == 'true'
bootstrap_data = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': slice_overwrite_perm,
'datasource': datasource.data,
'form_data': form_data,
'datasource_id': datasource_id,
'datasource_type': datasource_type,
'slice': slc.data if slc else None,
'standalone': standalone,
'user_id': user_id,
'user_name': g.user.username,
'forced_height': request.args.get('height'),
'common': self.common_bootsrap_payload(),
}
table_name = datasource.table_name \
if datasource_type == 'table' \
else datasource.datasource_name
if slc:
title = slc.slice_name
else:
title = _('Explore - %(table)s', table=table_name)
return self.render_template(
'superset/basic.html',
bootstrap_data=json.dumps(bootstrap_data),
entry='explore',
title=title,
standalone_mode=standalone)
@api
@has_access_api
@expose('/filter/<datasource_type>/<datasource_id>/<column>/')
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
if not security_manager.datasource_access(datasource):
return json_error_response(
security_manager.get_datasource_access_error_msg(datasource))
payload = json.dumps(
datasource.values_for_column(
column,
config.get('FILTER_SELECT_ROW_LIMIT', 10000),
),
default=utils.json_int_dttm_ser)
return json_success(payload)
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data, _ = self.get_form_data()
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
'Slice [{}] was added to dashboard [{}]'.format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
'Dashboard [{}] just got created and slice [{}] was added '
'to it'.format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = _('Chart [{}] has been saved').format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, 'info')
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = _('Chart [{}] has been overwritten').format(slc.slice_name)
flash(msg, 'info')
@api
@has_access_api
@expose('/checkbox/<model_view>/<id_>/<attr>/<value>', methods=['GET'])
def checkbox(self, model_view, id_, attr, value):
"""endpoint for checking/unchecking any boolean in a sqla model"""
modelview_to_model = {
'{}ColumnInlineView'.format(name.capitalize()): source.column_class
for name, source in ConnectorRegistry.sources.items()
}
model = modelview_to_model[model_view]
col = db.session.query(model).filter_by(id=id_).first()
checked = value == 'true'
if col:
setattr(col, attr, checked)
if checked:
metrics = col.get_metrics().values()
col.datasource.add_missing_metrics(metrics)
db.session.commit()
return json_success('OK')
@api
@has_access_api
@expose('/schemas/<db_id>/')
@expose('/schemas/<db_id>/<force_refresh>/')
def schemas(self, db_id, force_refresh='true'):
db_id = int(db_id)
force_refresh = force_refresh.lower() == 'true'
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
schemas = database.all_schema_names(force_refresh=force_refresh)
schemas = security_manager.schemas_accessible_by_user(database, schemas)
return Response(
json.dumps({'schemas': schemas}),
mimetype='application/json')
@api
@has_access_api
@expose('/tables/<db_id>/<schema>/<substr>/')
def tables(self, db_id, schema, substr):
"""Endpoint to fetch the list of tables for given database"""
db_id = int(db_id)
schema = utils.js_string_to_python(schema)
substr = utils.js_string_to_python(substr)
database = db.session.query(models.Database).filter_by(id=db_id).one()
table_names = security_manager.accessible_by_user(
database, database.all_table_names(schema), schema)
view_names = security_manager.accessible_by_user(
database, database.all_view_names(schema), schema)
if substr:
table_names = [tn for tn in table_names if substr in tn]
view_names = [vn for vn in view_names if substr in vn]
max_items = config.get('MAX_TABLE_NAMES') or len(table_names)
total_items = len(table_names) + len(view_names)
max_tables = len(table_names)
max_views = len(view_names)
if total_items and substr:
max_tables = max_items * len(table_names) // total_items
max_views = max_items * len(view_names) // total_items
table_options = [{'value': tn, 'label': tn}
for tn in table_names[:max_tables]]
table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}
for vn in view_names[:max_views]])
payload = {
'tableLength': len(table_names) + len(view_names),
'options': table_options,
}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose('/copy_dash/<dashboard_id>/', methods=['GET', 'POST'])
def copy_dash(self, dashboard_id):
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form.get('data'))
dash = models.Dashboard()
original_dash = (
session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data['dashboard_title']
if data['duplicate_slices']:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids['{}'.format(slc.id)] = \
'{}'.format(new_slice.id)
# update chartId of layout entities
# in v2_dash positions json data, chartId should be integer,
# while in older version slice_id is string type
for value in data['positions'].values():
if (
isinstance(value, dict) and value.get('meta') and
value.get('meta').get('chartId')
):
old_id = '{}'.format(value.get('meta').get('chartId'))
new_id = int(old_to_new_sliceids[old_id])
value['meta']['chartId'] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose('/save_dash/<dashboard_id>/', methods=['GET', 'POST'])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get('data'))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return 'SUCCESS'
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data['positions']
# find slices in the position data
slice_ids = []
slice_id_to_name = {}
for value in positions.values():
if (
isinstance(value, dict) and value.get('meta') and
value.get('meta').get('chartId')
):
slice_id = value.get('meta').get('chartId')
slice_ids.append(slice_id)
slice_id_to_name[slice_id] = value.get('meta').get('sliceName')
session = db.session()
Slice = models.Slice # noqa
current_slices = session.query(Slice).filter(
Slice.id.in_(slice_ids)).all()
dashboard.slices = current_slices
# update slice names. this assumes user has permissions to update the slice
for slc in dashboard.slices:
new_name = slice_id_to_name[slc.id]
if slc.slice_name != new_name:
slc.slice_name = new_name
session.merge(slc)
session.flush()
# remove leading and trailing white spaces in the dumped json
dashboard.position_json = json.dumps(
positions, indent=None, separators=(',', ':'), sort_keys=True)
md = dashboard.params_dict
dashboard.css = data.get('css')
dashboard.dashboard_title = data['dashboard_title']
if 'filter_immune_slices' not in md:
md['filter_immune_slices'] = []
if 'timed_refresh_immune_slices' not in md:
md['timed_refresh_immune_slices'] = []
if 'filter_immune_slice_fields' not in md:
md['filter_immune_slice_fields'] = {}
md['expanded_slices'] = data['expanded_slices']
default_filters_data = json.loads(data.get('default_filters', '{}'))
applicable_filters = \
{key: v for key, v in default_filters_data.items()
if int(key) in slice_ids}
md['default_filters'] = json.dumps(applicable_filters)
dashboard.json_metadata = json.dumps(md)
@api
@has_access_api
@expose('/add_slices/<dashboard_id>/', methods=['POST'])
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get('data'))
session = db.session()
Slice = models.Slice # noqa
dash = (
session.query(models.Dashboard).filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(
Slice.id.in_(data['slice_ids']))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return 'SLICES ADDED'
@api
@has_access_api
@expose('/testconn', methods=['POST', 'GET'])
def testconn(self):
"""Tests a sqla connection"""
try:
username = g.user.username if g.user is not None else None
uri = request.json.get('uri')
db_name = request.json.get('name')
impersonate_user = request.json.get('impersonate_user')
database = None
if db_name:
database = (
db.session
.query(models.Database)
.filter_by(database_name=db_name)
.first()
)
if database and uri == database.safe_sqlalchemy_uri():
# the password-masked uri was passed
# use the URI associated with this database
uri = database.sqlalchemy_uri_decrypted
configuration = {}
if database and uri:
url = make_url(uri)
db_engine = models.Database.get_db_engine_spec_for_backend(
url.get_backend_name())
db_engine.patch()
masked_url = database.get_password_masked_url_from_uri(uri)
logging.info('Superset.testconn(). Masked URL: {0}'.format(masked_url))
configuration.update(
db_engine.get_configuration_for_impersonation(uri,
impersonate_user,
username),
)
engine_params = (
request.json
.get('extras', {})
.get('engine_params', {}))
connect_args = engine_params.get('connect_args')
if configuration:
connect_args['configuration'] = configuration
engine = create_engine(uri, **engine_params)
engine.connect()
return json_success(json.dumps(engine.table_names(), indent=4))
except Exception as e:
logging.exception(e)
return json_error_response((
'Connection failed!\n\n'
'The error message returned was:\n{}').format(e))
@api
@has_access_api
@expose('/recent_activity/<user_id>/', methods=['GET'])
def recent_activity(self, user_id):
"""Recent activity (actions) for a given user"""
M = models # noqa
if request.args.get('limit'):
limit = int(request.args.get('limit'))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, M.Slice)
.outerjoin(
M.Dashboard,
M.Dashboard.id == M.Log.dashboard_id,
)
.outerjoin(
M.Slice,
M.Slice.id == M.Log.slice_id,
)
.filter(
sqla.and_(
~M.Log.action.in_(('queries', 'shortner', 'sql_json')),
M.Log.user_id == user_id,
),
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append({
'action': log.Log.action,
'item_url': item_url,
'item_title': item_title,
'time': log.Log.dttm,
})
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/csrf_token/', methods=['GET'])
def csrf_token(self):
return Response(
self.render_template('superset/csrf_token.json'),
mimetype='text/json',
)
@api
@has_access_api
@expose('/fave_dashboards_by_username/<username>/', methods=['GET'])
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose('/fave_dashboards/<user_id>/', methods=['GET'])
def fave_dashboards(self, user_id):
qry = (
db.session.query(
models.Dashboard,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'Dashboard',
models.Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Dashboard.id,
'dashboard': o.Dashboard.dashboard_link(),
'title': o.Dashboard.dashboard_title,
'url': o.Dashboard.url,
'dttm': o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/created_dashboards/<user_id>/', methods=['GET'])
def created_dashboards(self, user_id):
Dash = models.Dashboard # noqa
qry = (
db.session.query(
Dash,
)
.filter(
sqla.or_(
Dash.created_by_fk == user_id,
Dash.changed_by_fk == user_id,
),
)
.order_by(
Dash.changed_on.desc(),
)
)
payload = [{
'id': o.id,
'dashboard': o.dashboard_link(),
'title': o.dashboard_title,
'url': o.url,
'dttm': o.changed_on,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/user_slices', methods=['GET'])
@expose('/user_slices/<user_id>/', methods=['GET'])
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
FavStar = models.FavStar # noqa
qry = (
db.session.query(Slice,
FavStar.dttm).join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
isouter=True).filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
),
)
.order_by(Slice.slice_name.asc())
)
payload = [{
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'data': o.Slice.form_data,
'dttm': o.dttm if o.dttm else o.Slice.changed_on,
'viz_type': o.Slice.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/created_slices', methods=['GET'])
@expose('/created_slices/<user_id>/', methods=['GET'])
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
qry = (
db.session.query(Slice)
.filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
),
)
.order_by(Slice.changed_on.desc())
)
payload = [{
'id': o.id,
'title': o.slice_name,
'url': o.slice_url,
'dttm': o.changed_on,
'viz_type': o.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/fave_slices', methods=['GET'])
@expose('/fave_slices/<user_id>/', methods=['GET'])
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(
models.Slice,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'dttm': o.dttm,
'viz_type': o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/warm_up_cache/', methods=['GET'])
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get('slice_id')
table_name = request.args.get('table_name')
db_name = request.args.get('db_name')
if not slice_id and not (table_name and db_name):
return json_error_response(__(
'Malformed request. slice_id or table_name and db_name '
'arguments are expected'), status=400)
if slice_id:
slices = session.query(models.Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(__(
'Chart %(id)s not found', id=slice_id), status=404)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources['table']
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name or
SqlaTable.table_name == table_name)
).first()
if not table:
return json_error_response(__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name, s=db_name), status=404)
slices = session.query(models.Slice).filter_by(
datasource_id=table.id,
datasource_type=table.type).all()
for slc in slices:
try:
obj = slc.get_viz(force=True)
obj.get_json()
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
return json_success(json.dumps(
[{'slice_id': slc.id, 'slice_name': slc.slice_name}
for slc in slices]))
@expose('/favstar/<class_name>/<obj_id>/<action>/')
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar # noqa
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id,
user_id=g.user.get_id()).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
),
)
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({'count': count}))
@has_access
@expose('/dashboard/<dashboard_id>/')
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one()
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config.get('ENABLE_ACCESS_REQUEST'):
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
'dashboard_id={dash.id}&'.format(**locals()))
dash_edit_perm = True
if check_dbp_user(g.user, app.config['ENABLE_DASHBOARD_SHARE_IN_CUSTOM_ROLE']):
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset') and g.user.id == dash.created_by_fk
else:
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset')
dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')
superset_can_explore = security_manager.can_access('can_explore', 'Superset')
slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')
standalone_mode = request.args.get('standalone') == 'true'
edit_mode = request.args.get('edit') == 'true'
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(
dashboard_id=dash.id,
dashboard_version='v2',
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode)
dashboard_data = dash.data
dashboard_data.update({
'standalone_mode': standalone_mode,
'dash_save_perm': dash_save_perm,
'dash_edit_perm': dash_edit_perm,
'superset_can_explore': superset_can_explore,
'slice_can_edit': slice_can_edit,
})
bootstrap_data = {
'user_id': g.user.get_id(),
'user_name': g.user.username,
'dashboard_data': dashboard_data,
'datasources': {ds.uid: ds.data for ds in datasources},
'common': self.common_bootsrap_payload(),
'editMode': edit_mode,
}
if request.args.get('json') == 'true':
return json_success(json.dumps(bootstrap_data))
return self.render_template(
'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(bootstrap_data),
)
@api
@log_this
@expose('/log/', methods=['POST'])
def log(self):
return Response(status=200)
@has_access
@expose('/sync_druid/', methods=['POST'])
@log_this
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload['config']
user_name = payload['user']
cluster_name = payload['cluster']
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources['druid']
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
cluster_name=cluster_name).first()
if not cluster:
err_msg = __("Can't find DruidCluster with cluster_name = "
"'%(name)s'", name=cluster_name)
logging.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(
druid_config, user, cluster)
except Exception as e:
logging.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose('/sqllab_viz/', methods=['POST'])
@log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources['table']
data = json.loads(request.form.get('data'))
table_name = data.get('datasourceName')
table = (
db.session.query(SqlaTable)
.filter_by(table_name=table_name)
.first()
)
if not table:
table = SqlaTable(table_name=table_name)
table.database_id = data.get('dbId')
table.schema = data.get('schema')
table.template_params = data.get('templateParams')
table.is_sqllab_view = True
q = SupersetQuery(data.get('sql'))
table.sql = q.stripped()
db.session.add(table)
cols = []
for config in data.get('columns'):
column_name = config.get('name')
SqlaTable = ConnectorRegistry.sources['table']
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config.get('is_date', False),
type=config.get('type', False),
)
cols.append(col)
table.columns = cols
table.metrics = [
SqlMetric(metric_name='count', expression='count(*)'),
]
db.session.commit()
return self.json_response(json.dumps({
'table_id': table.id,
}))
@has_access
@expose('/table/<database_id>/<table_name>/<schema>/')
@log_this
def table(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload_columns = []
indexes = []
primary_key = []
foreign_keys = []
try:
columns = mydb.get_columns(table_name, schema)
indexes = mydb.get_indexes(table_name, schema)
primary_key = mydb.get_pk_constraint(table_name, schema)
foreign_keys = mydb.get_foreign_keys(table_name, schema)
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
keys = []
if primary_key and primary_key.get('constrained_columns'):
primary_key['column_names'] = primary_key.pop('constrained_columns')
primary_key['type'] = 'pk'
keys += [primary_key]
for fk in foreign_keys:
fk['column_names'] = fk.pop('constrained_columns')
fk['type'] = 'fk'
keys += foreign_keys
for idx in indexes:
idx['type'] = 'index'
keys += indexes
for col in columns:
dtype = ''
try:
dtype = '{}'.format(col['type'])
except Exception:
# sqla.types.JSON __str__ has a bug, so using __class__.
dtype = col['type'].__class__.__name__
pass
payload_columns.append({
'name': col['name'],
'type': dtype.split('(')[0] if '(' in dtype else dtype,
'longType': dtype,
'keys': [
k for k in keys
if col['name'] in k.get('column_names')
],
})
tbl = {
'name': table_name,
'columns': payload_columns,
'selectStar': mydb.select_star(
table_name, schema=schema, show_cols=True, indent=True,
cols=columns, latest_partition=True),
'primaryKey': primary_key,
'foreignKeys': foreign_keys,
'indexes': keys,
}
return json_success(json.dumps(tbl))
@has_access
@expose('/extra_table_metadata/<database_id>/<table_name>/<schema>/')
@log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(
mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose('/select_star/<database_id>/<table_name>')
@expose('/select_star/<database_id>/<table_name>/<schema>')
@log_this
def select_star(self, database_id, table_name, schema=None):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
return json_success(
mydb.select_star(
table_name,
schema,
latest_partition=True,
show_cols=True,
),
)
@expose('/theme/')
def theme(self):
return self.render_template('superset/theme.html')
@has_access_api
@expose('/cached_key/<key>/')
@log_this
def cached_key(self, key):
"""Returns a key from the cache"""
resp = cache.get(key)
if resp:
return resp
return 'nope'
@has_access_api
@expose('/cache_key_exist/<key>/')
@log_this
def cache_key_exist(self, key):
"""Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status)
@has_access_api
@expose('/results/<key>/')
@log_this
def results(self, key):
"""Serves a key off of the results backend"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = utils.now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
'sqllab.query.results_backend_read',
utils.now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
'Data could not be retrieved. '
'You may want to re-run the query.',
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one()
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
return json_error_response(security_manager.get_table_access_error_msg(
'{}'.format(rejected_tables)), status=403)
payload = utils.zlib_decompress_to_string(blob)
display_limit = app.config.get('DISPLAY_MAX_ROW', None)
if display_limit:
payload_json = json.loads(payload)
payload_json['data'] = payload_json['data'][:display_limit]
return json_success(
json.dumps(
payload_json,
default=utils.json_iso_dttm_ser,
ignore_nan=True,
),
)
@has_access_api
@expose('/stop_query/', methods=['POST'])
@log_this
def stop_query(self):
client_id = request.form.get('client_id')
try:
query = (
db.session.query(Query)
.filter_by(client_id=client_id).one()
)
query.status = utils.QueryStatus.STOPPED
db.session.commit()
except Exception:
pass
return self.json_response('OK')
@has_access_api
@expose('/sql_json/', methods=['POST', 'GET'])
@log_this
def sql_json(self):
"""Runs arbitrary sql and returns and json"""
async_ = request.form.get('runAsync') == 'true'
sql = request.form.get('sql')
database_id = request.form.get('database_id')
schema = request.form.get('schema') or None
template_params = json.loads(
request.form.get('templateParams') or '{}')
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).first()
if not mydb:
json_error_response(
'Database with id {} is missing.'.format(database_id))
rejected_tables = security_manager.rejected_datasources(sql, mydb, schema)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403)
session.commit()
select_as_cta = request.form.get('select_as_cta') == 'true'
tmp_table_name = request.form.get('tmp_table_name')
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = '{}.{}'.format(
mydb.force_ctas_schema,
tmp_table_name,
)
client_id = request.form.get('client_id') or utils.shortid()[:10]
query = Query(
database_id=int(database_id),
limit=mydb.db_engine_spec.get_limit_from_sql(sql),
sql=sql,
schema=schema,
select_as_cta=request.form.get('select_as_cta') == 'true',
start_time=utils.now_as_float(),
tab_name=request.form.get('tab'),
status=QueryStatus.PENDING if async_ else QueryStatus.RUNNING,
sql_editor_id=request.form.get('sql_editor_id'),
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
if not query_id:
raise Exception(_('Query record was not created as expected.'))
logging.info('Triggering query_id: {}'.format(query_id))
try:
template_processor = get_template_processor(
database=query.database, query=query)
rendered_query = template_processor.process_template(
query.sql,
**template_params)
except Exception as e:
return json_error_response(
'Template rendering failed: {}'.format(utils.error_msg_from_exception(e)))
# Async request.
if async_:
logging.info('Running query on a Celery worker')
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query_id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=utils.now_as_float())
except Exception as e:
logging.exception(e)
msg = (
'Failed to start remote query on a worker. '
'Tell your administrator to verify the availability of '
'the message queue.'
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response('{}'.format(msg))
resp = json_success(json.dumps(
{'query': query.to_dict()}, default=utils.json_int_dttm_ser,
ignore_nan=True), status=202)
session.commit()
return resp
# Sync request.
try:
timeout = config.get('SQLLAB_TIMEOUT')
timeout_msg = (
'The query exceeded the {timeout} seconds '
'timeout.').format(**locals())
with utils.timeout(seconds=timeout,
error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query_id,
rendered_query,
return_results=True,
user_name=g.user.username if g.user else None)
payload = json.dumps(
data,
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logging.exception(e)
return json_error_response('{}'.format(e))
if data.get('status') == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access
@expose('/csv/<client_id>')
@log_this
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = (
'attachment; filename={}.csv'.format(unidecode(query.name)))
logging.info('Ready to return response')
return response
@has_access
@expose('/fetch_datasource_metadata')
@log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = (
request.args.get('datasourceKey').split('__'))
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
# Check permission for datasource
if not security_manager.datasource_access(datasource):
return json_error_response(
security_manager.get_datasource_access_error_msg(datasource),
link=security_manager.get_datasource_access_link(datasource))
return json_success(json.dumps(datasource.data))
@expose('/queries/<last_updated_ms>')
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [
utils.QueryStatus.PENDING,
utils.QueryStatus.RUNNING,
]
queries_to_timeout = [
client_id for client_id, query_dict in dict_queries.items()
if (
query_dict['state'] in unfinished_states and (
now - query_dict['startDttm'] >
config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000
)
)
]
if queries_to_timeout:
update(Query).where(
and_(
Query.user_id == g.user.get_id(),
Query.client_id in queries_to_timeout,
),
).values(state=utils.QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = utils.QueryStatus.TIMED_OUT
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose('/search_queries')
@log_this
def search_queries(self):
"""Search for queries."""
query = db.session.query(Query)
search_user_id = request.args.get('user_id')
database_id = request.args.get('database_id')
search_text = request.args.get('search_text')
status = request.args.get('status')
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get('from')
to_time = request.args.get('to')
if search_user_id:
# Filter on db Id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query \
.filter(Query.sql.like('%{}%'.format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config.get('QUERY_SEARCH_LIMIT', 1000)
sql_queries = (
query.order_by(Query.start_time.asc())
.limit(query_limit)
.all()
)
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype='application/json')
@app.errorhandler(500)
def show_traceback(self):
return render_template(
'superset/traceback.html',
error_msg=get_error_msg(),
), 500
@expose('/welcome')
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session
.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
'user': bootstrap_user_data(),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='welcome',
title='Superset',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
@has_access
@expose('/profile/<username>/')
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
payload = {
'user': bootstrap_user_data(username, include_perms=True),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
title=_("%(user)s's profile", user=username),
entry='profile',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
@has_access
@expose('/sqllab')
def sqllab(self):
"""SQL Editor"""
d = {
'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='sqllab',
bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),
)
@api
@has_access_api
@expose('/slice_query/<slice_id>/')
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = self.get_viz(slice_id)
if not security_manager.datasource_access(viz_obj.datasource):
return json_error_response(
security_manager.get_datasource_access_error_msg(viz_obj.datasource),
status=401,
link=security_manager.get_datasource_access_link(viz_obj.datasource))
return self.get_query_string_response(viz_obj)
@api
@has_access_api
@expose('/schema_access_for_csv_upload')
def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get('db_id'):
return json_error_response(
'No database is allowed for your csv upload')
db_id = int(request.args.get('db_id'))
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False)
return self.json_response(schemas_allowed_processed)
except Exception:
return json_error_response((
'Failed to fetch schemas allowed for csv upload in this database! '
'Please contact Superset Admin!\n\n'
'The error message returned was:\n{}').format(traceback.format_exc()))
appbuilder.add_view_no_menu(Superset)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
list_title = _('List Css Template')
show_title = _('Show Css Template')
add_title = _('Add Css Template')
edit_title = _('Edit Css Template')
list_columns = ['template_name']
edit_columns = ['template_name', 'css']
add_columns = edit_columns
label_columns = {
'template_name': _('Template Name'),
}
class CssTemplateAsyncModelView(CssTemplateModelView):
list_columns = ['template_name', 'css']
appbuilder.add_separator('Sources')
appbuilder.add_view(
CssTemplateModelView,
'CSS Templates',
label=__('CSS Templates'),
icon='fa-css3',
category='Manage',
category_label=__('Manage'),
category_icon='')
appbuilder.add_view_no_menu(CssTemplateAsyncModelView)
appbuilder.add_link(
'SQL Editor',
label=_('SQL Editor'),
href='/superset/sqllab',
category_icon='fa-flask',
icon='fa-flask',
category='SQL Lab',
category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Query Search',
label=_('Query Search'),
href='/superset/sqllab#search',
icon='fa-search',
category_icon='fa-flask',
category='SQL Lab',
category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Upload a CSV',
label=__('Upload a CSV'),
href='/csvtodatabaseview/form',
icon='fa-upload',
category='Sources',
category_label=__('Sources'),
category_icon='fa-wrench')
appbuilder.add_separator('Sources')
@app.after_request
def apply_caching(response):
"""Applies the configuration's http headers to all responses"""
for k, v in config.get('HTTP_HEADERS').items():
response.headers[k] = v
return response
# ---------------------------------------------------------------------
# Redirecting URL from previous names
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
@app.route('/<regex("panoramix\/.*"):url>')
def panoramix(url): # noqa
return redirect(request.full_path.replace('panoramix', 'superset'))
@app.route('/<regex("caravel\/.*"):url>')
def caravel(url): # noqa
return redirect(request.full_path.replace('caravel', 'superset'))
# ---------------------------------------------------------------------
class SupersetCasAuthDBView(AuthDBView):
login_template = 'appbuilder/general/security/login_cas.html'
@expose('/hna_iam_authorize', methods=['GET'])
def cas_authorized(self):
if g.user is not None and g.user.is_authenticated:
return redirect(self.appbuilder.get_url_for_index)
return redirect(app.config['IAM_LOGIN_VALID_URL'] + "?service=" + app.config['SUPERSET_CAS_CALL_URL'] + "¶ms=")
def add_role_if_missing(self, sm, user_id, role_name):
found_role = sm.find_role(role_name)
session = sm.get_session
user = session.query(sm.user_model).get(user_id)
if found_role and found_role not in user.roles:
user.roles += [found_role]
session.commit()
@expose('/callback', methods=['GET'])
def cas_callback(self):
if 'ticket' not in request.args:
flash("Invalid ticket param in callback")
return redirect(self.appbuilder.get_url_for_login)
ticket = request.args.get('ticket')
validateUrl = "%s?service=%s&ticket=%s&format=json" % (app.config['IAM_VALID_URL'], app.config['SUPERSET_CAS_CALL_URL'], ticket)
import requests
res = requests.get(validateUrl)
if res.status_code != 200 :
flash("request iam validate failure in callback")
return redirect(self.appbuilder.get_url_for_login)
user_info = res.content.decode()
user_info_json = json.load(user_info)
if 'authenticationSuccess' in user_info_json['serviceResponse']:
sucessRes = user_info_json['serviceResponse']['authenticationSuccess']
username = sucessRes.get('user')
email = sucessRes['attributes'].get('email')
sm = self.appbuilder.sm
user = sm.find_user(username)
role = sm.find_role(app.config['CUSTOM_ROLE_NAME_KEYWORD'])
if user is None and username:
user = sm.add_user(
username=username,
first_name=username,
last_name='',
email=email,
role=role
)
msg = ("Welcome to Superset, {}".format(username))
flash(msg, 'info')
user = sm.auth_user_remote_user(username)
self.add_role_if_missing(sm, user.id, app.config['CUSTOM_ROLE_NAME_KEYWORD'])
login_user(user)
return redirect(self.redirect_url())
else:
flash("Error :%s " % user_info_json['serviceResponse']['authenticationFailure']['description'])
return redirect(self.appbuilder.get_url_for_login)
class VistorRegModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.VisitorReg)
list_title = _('List Vistors ')
show_title = _('Show Vistor')
add_title = _('Add Vistor')
edit_title = _('Edit Vistor')
list_columns = [
'jbh_uid', 'name', 'phone', 'group_prop',
'registry_type', 'first_vistor_time', 'first_receptor', 'communication_times', 'agree', 'status'
]
add_columns = [
'jbh_uid', 'name', 'phone', 'group_prop',
'registry_type', 'illustration', 'first_vistor_time', 'first_receptor', 'communication_times', 'agree', 'status'
]
label_columns = {
'jbh_uid': _('聚宝汇UID'),
'name': _('姓名'),
'phone': _('电话'),
'group_prop': _('集团属性'),
'registry_type': _('登记类型'),
'first_vistor_time': _('首次来访时间'),
'first_receptor': _('首次接待人员'),
'communication_times': _('沟通次数'),
'agree': _('客户是否同意'),
'status': _('状态'),
'illustration': _('客户诉求'),
}
appbuilder.add_view(
VistorRegModelView,
'Vistor Registion',
label=__('访客登记'),
icon='fa-registered',
category='Disposal process',
category_label=__('处置流程'),
category_icon='fa-hand-lizard-o')
appbuilder.add_separator('Disposal process')
appbuilder.add_link(
'Investor Communication',
label=__('投资人沟通'),
href='/csvtodatabaseview/form',
icon='fa-odnoklassniki',
category='Disposal process',
category_label=__('处置流程'),
category_icon='fa-hand-lizard-o')
appbuilder.add_separator('Disposal process')
appbuilder.add_link(
'Cash Plan',
label=__('兑付计划'),
href='/csvtodatabaseview/form',
icon='fa-odnoklassniki',
category='Disposal process',
category_label=__('处置流程'),
category_icon='fa-hand-lizard-o')
|
the-stack_0_11581 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, \
get_object_or_404 # redirect consegue mandar uma pessoa p uma url, no caso a person_list
# get object é para pegar o objeto do usuário e caso não consiga, retorna um 404
from .models import Pessoa
from .forms import PersonForm
@login_required
def person_list(request):
persons = Pessoa.objects.all() # é como um select * from Pessoa, ou seja, busca todas as pessoas
return render(request, 'person.html', {"galeres": persons})
@login_required
def person_new(request):
form = PersonForm(request.POST or None,
request.FILES or None) # o files são para pegar os arquivos de midia mandados
if form.is_valid():
form.save()
return redirect('person_list') # após concluir a ação, irá redirecionar para a lista
return render(request, 'person_form.html', {'form': form})
@login_required
def person_update(request, id):
pessoa = get_object_or_404(Pessoa, pk=id) # o pk é que vai procurar a pessoa atraves do id no banco de dados, o pk é o id no bd / Pessoa é no models
form = PersonForm(request.POST or None, request.FILES or None, instance=pessoa) # PersonForm é no forms.py
if form.is_valid():
form.save()
return redirect('person_list')
return render(request, 'person_form.html', {'form': form})
@login_required
def person_delete(request, id):
pessoa = get_object_or_404(Pessoa, pk=id)
form = PersonForm(request.POST or None, request.FILES or None, instance=pessoa) # não sei como isso se dá, mas o form nesse caso é usado
#para quando clicarmos em deletar, ir para o formulário da pessoa. Caso não seja necessário, podemos apenas deixar a página com o botão delete
if request.method == 'POST': # se o usuario usar http post, retornará TRUE, se não, retornará FALSE
pessoa.delete()
return redirect('person_list')
return render(request, 'person_delete_confirm.html', {'form': form})
|
the-stack_0_11584 | # Copyright 2016 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from vitrageclient.common import yaml_utils
from vitrageclient import exceptions as exc
class Template(object):
url = 'v1/template/'
def __init__(self, api):
self.api = api
def list(self):
"""Get templates list"""
return self.api.get(self.url).json()
def versions(self):
"""Get templates versions"""
return self.api.get(self.url + 'versions').json()
def show(self, _id):
"""Show template content"""
url = self.url + _id
return self.api.get(url).json()
def add(self, path=None, template_type=None,
params=None, template_str=None, overwrite=False):
"""Add a new template
:param path: (optional) The template file path or templates dir path
:param template_type: (optional) The template type, in case it is not
written inside the template metadata section
:param params: (optional) Actual values for the template parameters
:param template_str: (optional) A string representation of the template
:param overwrite: (optional) overwrite the template if exists
yaml
Either path or template_str must exist (but not both)
:return:
"""
files_content = \
self._load_template(path=path, template_str=template_str)
api_params = dict(templates=files_content,
template_type=template_type,
params=params, overwrite=overwrite)
return self.api.put(self.url, json=api_params).json()
def delete(self, ids):
"""Delete existing"""
params = dict(id=ids)
return self.api.delete(self.url, json=params).json()
def validate(self, path=None, template_type=None,
params=None, template_str=None):
"""Template validation
Make sure that the template file is correct in terms of syntax
and content.
It is possible to pass a specific file path in order to validate one
template, or directory path for validation of several templates (the
directory must contain only templates)
:param path: (optional) The template file path or templates dir path
:param template_type: (optional) The template type, in case it is not
written inside the template metadata section
:param params: (optional) Actual values for the template parameters
:param template_str: (optional) A string representation of the template
yaml
Either path or template_str must exist (but not both)
:return:
"""
files_content = \
self._load_template(path=path, template_str=template_str)
api_params = dict(templates=files_content,
template_type=template_type,
params=params)
return self.api.post(self.url, json=api_params).json()
@classmethod
def _load_yaml_files(cls, path):
if os.path.isdir(path):
files_content = []
for file_name in os.listdir(path):
file_path = '%s/%s' % (path, file_name)
if os.path.isfile(file_path):
template = cls._load_yaml_file(file_path)
files_content.append((file_path, template))
else:
files_content = [(path, cls._load_yaml_file(path))]
return files_content
@classmethod
def _load_yaml_file(cls, path):
with open(path, 'r') as stream:
return cls._load_yaml(stream)
@classmethod
def _load_yaml(cls, yaml_content):
try:
return yaml_utils.load(yaml_content)
except ValueError as e:
message = 'Could not load template: %s. Reason: %s' \
% (yaml_content, e)
raise exc.CommandError(message)
@classmethod
def _load_template(cls, path, template_str):
if path:
files_content = cls._load_yaml_files(path)
elif template_str:
files_content = [(None, cls._load_yaml(template_str))]
else:
raise exc.CommandError(
'Add template API must be called with either \'path\' or '
'\'template_str\'')
return files_content
|
the-stack_0_11588 | import copy
import random
import time
from functools import partial
from sklearn.utils import shuffle
import numpy as np
from sklearn import linear_model
from dnl import Sampling_Methods, Solver
from dnl.PredictPlustOptimizeUtils import compute_C_k
from dnl.Solver import get_optimization_objective
from dnl.Utils import TransitionPoint, get_mini_batches
from operator import attrgetter
import multiprocessing as mp
CONV_CONST = 10E-6
MEDIAN_LOSS = 'MEDIAN'
MEAN_LOSS = 'MEAN LOSS'
class PredictPlusOptModel:
def __init__(self, alphas=None, const=None, opt_params=None, loss=MEDIAN_LOSS, max_step_size_magnitude=1,
min_step_size_magnitude=-1,
step_size_divider=10, sampling_method=Sampling_Methods.DIVIDE_AND_CONQUER,
is_parallel=True, learning_rate=0.1, mini_batch_size=32, beta=0, epoch_limit=3, run_time_limit=100000,
verbose=False, is_Val = True):
"""
:param alphas: model parameters
:param const: model constant
:param capacities: capacity of the optimization problem
:param max_step_size_magnitude: sample space upper bound
:param min_step_size_magnitude: sample space lower step size
:param step_size_divider:
:param sampling_method:
"""
self.alphas = alphas
self.const = const
self.opt_params = opt_params
self.is_val = is_Val
self.step_size_divider = step_size_divider
self.is_parallel = is_parallel
if mini_batch_size == -1:
self.learning_rate = 1
else:
self.learning_rate = learning_rate
self.mini_batch_size = mini_batch_size
self.epoch_limit = epoch_limit
self.run_time_limit = run_time_limit
self.training_obj_value = []
self.test_regrets = []
self.val_regrets = []
self.epochs = []
self.sub_epochs = []
self.run_time = []
self.max_step_size_magnitude = max_step_size_magnitude
self.min_step_size_magnitude = min_step_size_magnitude
self.sampling_method = sampling_method
self.test_MSE = 0
self.loss = loss
self.number_of_epochs = 0
self.test_regret = 0
self.training_MSE = 0
self.test_run_time = 0
self.beta = beta
self.verbose = verbose
def init_params_lin_regression(self, X,Y):
"""
initialize the model with linear regression
:param train_set:
:return:
"""
params = initialize_parameters(X,Y)
self.__setattr__('alphas', params.get('alphas'))
self.__setattr__('const', params.get('const'))
self.__setattr__('capacities', params.get('capacities'))
def coordinate_descent(self, train_X, train_Y, train_weights, val_X, val_Y, val_weights, print_test=False, test_X=None, test_Y=None,
test_weights=None, core_number=7):
"""
Uses coordinate descent to optimize parameters
:param train_X: test set features
:param train_Y: test set output
:param train_weights:
:return: profit: average profit of the training set
"""
# self_decided_features = [4, 5, 6, 7]
# self_decided_features = range(8)
# self_decided_features = [4]
is_break = False
self_decided_features = list(range(len(self.alphas)))
prev_profit = -10
model_params = {'alphas': self.alphas,
'const': self.const}
profit = np.median(get_optimization_objective(X=train_X, Y=train_Y, weights=train_weights,
opt_params=self.opt_params, model_params=model_params))
test_regret = np.median(self.get_regret(test_X, test_Y, test_weights))
val_regret = np.median(self.get_regret(val_X, val_Y, val_weights))
self.test_regrets.append(test_regret)
self.training_obj_value.append(profit)
self.run_time.append(0)
self.epochs.append(0)
self.sub_epochs.append(0)
self.val_regrets.append(val_regret)
start_time = time.time()
print("original objective value: " + str(profit))
EPOCH = 0
direction = np.zeros(len(self_decided_features))
momentum = np.zeros(len(self_decided_features))
sampler = Sampling_Methods.Sampler(max_step_size_magnitude=self.max_step_size_magnitude,
min_step_size_magnitude=self.min_step_size_magnitude,
step_size_divider=self.step_size_divider,
sampling_method=self.sampling_method,
opt_params=self.opt_params)
if self.is_parallel:
mypool = mp.Pool(processes=min(8, core_number))
else:
mypool = None
print("------------------------")
train_X_og = train_X
train_Y_og = train_Y
train_weights_og = train_weights
if self.mini_batch_size == -1:
mini_batch_size = len(train_Y)
else:
mini_batch_size = self.mini_batch_size
mini_batches_X, mini_batches_Y, mini_batches_weights = get_mini_batches(X=train_X, Y=train_Y,
weights=train_weights,
size=mini_batch_size)
sub_epoch = 0
while (EPOCH < self.epoch_limit) and self.run_time[-1] < self.run_time_limit and not converge(profit, prev_profit, CONV_CONST, self.mini_batch_size):
mini_batches_X, mini_batches_Y, mini_batches_weights = shuffle(mini_batches_X, mini_batches_Y,
mini_batches_weights)
for mini_batch_X, mini_batch_Y, mini_batch_weights in zip(mini_batches_X, mini_batches_Y,
mini_batches_weights):
train_X = mini_batch_X
train_Y = mini_batch_Y
train_weights = mini_batch_weights
profit = np.median(get_optimization_objective(X=train_X, Y=train_Y, weights=train_weights,
opt_params=self.opt_params, model_params=model_params))
# cut for minibatch
prev_profit = profit
print("-----------------------")
# use for raandom
# for k in random.sample(range(len(self.alphas)), len(self.alphas) - 1):
# for k in range(len(self.alphas)):
random.seed(time.time())
random.shuffle(self_decided_features)
for k in self_decided_features:
model_params = {'alphas': self.alphas,
'const': self.const}
current_alpha = self.alphas[k, 0]
best_transition_points_set = set()
if self.is_parallel:
map_func = partial(get_and_clean_transition_points, sampler=sampler, model_params=model_params,
k=k,
opt_params=self.opt_params,
current_alpha=current_alpha)
iter = [[benchmark_X, benchmark_Y, benchmark_weights] for
benchmark_X, benchmark_Y, benchmark_weights in
zip(train_X, train_Y, train_weights)]
best_transition_points_set = mypool.starmap(map_func, iter)
best_transition_points_set = set().union(*best_transition_points_set)
benchmark_best_transition_point = find_the_best_transition_point_benchmarks(train_X, train_Y,
k=k,
model_params=model_params,
train_weights=train_weights,
opt_params=self.opt_params,
transition_point_list=list(
best_transition_points_set),
prev_profit=profit,
pool=mypool)
else:
for benchmark_X, benchmark_Y, benchmark_weights in zip(train_X, train_Y, train_weights):
best_transition_point, __, predicted_regrets, regrets, plot_x = sampler.get_transition_points(
model_params=model_params, k=k,
train_X=benchmark_X,
train_Y=benchmark_Y,
train_weights=benchmark_weights)
best_transition_point_set_benchmark = clean_transition_points(
transition_points=best_transition_point[-1],
benchmark_X=benchmark_X,
benchmark_Y=benchmark_Y, weights=benchmark_weights,
model_params=model_params, opt_params=self.opt_params,
current_alpha=current_alpha)
best_transition_points_set = best_transition_points_set.union(
best_transition_point_set_benchmark)
# To reduce training time move this process to the sampling method so we dont iterate through transition points list twice
benchmark_best_transition_point = find_the_best_transition_point_benchmarks(train_X, train_Y,
k=k,
model_params=model_params,
train_weights=train_weights,
opt_params=self.opt_params,
transition_point_list=list(
best_transition_points_set),
prev_profit=profit)
gradient = benchmark_best_transition_point.x - self.alphas[k, 0]
# print((
# benchmark_best_transition_point.x - self.alphas[k, 0]))
# print('dir', direction[k])
# if abs(gradient) > 0:
# gradient = gradient / abs(gradient)
direction[k] = -self.beta * momentum[k] - (1 - self.beta) * gradient
# print(momentum, gradient, direction)
# print('mom: ', momentum[k], 'dir: ', direction[k])
self.alphas[k, 0] = self.alphas[k, 0] - direction[k] * self.learning_rate
momentum[k] = direction[k] * 1
profit = benchmark_best_transition_point.true_profit
#record data for each parameter update if its full batch
if self.mini_batch_size == -1:
if self.is_val:
# print('val')
val_regret = np.median(self.get_regret(val_X, val_Y, val_weights, pool=mypool))
self.val_regrets.append(val_regret)
test_run_time = time.time()
if print_test:
# print('test')
test_regret = np.median(self.get_regret(test_X, test_Y, test_weights, pool=mypool))
self.test_regrets.append(test_regret)
train_regret = np.median(self.get_regret(train_X, train_Y, train_weights, pool=mypool))
self.training_obj_value.append(train_regret)
if self.verbose:
print('updating parameter', k, 'test regret', test_regret)
print("Updating Parameter: " + str(k) + " profit: " + str(profit))
self.test_run_time = self.test_run_time + time.time() - test_run_time
sub_epoch = sub_epoch + 1
self.sub_epochs.append(sub_epoch)
self.epochs.append(EPOCH)
self.run_time.append((time.time() - start_time - self.test_run_time))
print("EPOCH:", EPOCH, "sub epoch:", sub_epoch, "objective value:", profit, 'val regret',
self.val_regrets[-1], 'test regret', self.test_regrets[-1], flush=True)
if not self.mini_batch_size == -1:
# Record data after each batch for mini batches
if self.is_val:
# print('val')
val_regret = np.median(self.get_regret(val_X,val_Y,val_weights,pool=mypool))
self.val_regrets.append(val_regret)
test_run_time = time.time()
if (print_test):
# print('test')
test_regret = np.median(self.get_regret(test_X, test_Y, test_weights,pool=mypool))
self.test_regrets.append(test_regret)
train_regret = np.median(self.get_regret(train_X, train_Y, train_weights,pool=mypool))
self.training_obj_value.append(train_regret)
if self.verbose:
print('updating parameter', k, 'test regret', test_regret)
print("Updating Parameter: " + str(k) + " profit: " + str(profit))
self.test_run_time = self.test_run_time + time.time() - test_run_time
sub_epoch = sub_epoch + 1
self.sub_epochs.append(sub_epoch)
self.epochs.append(EPOCH)
self.run_time.append((time.time() - start_time - self.test_run_time))
print("EPOCH:", EPOCH, "sub epoch:", sub_epoch, "objective value:", profit, 'val regret', self.val_regrets[-1],'test regret', self.test_regrets[-1])
if self.run_time[-1] > self.run_time_limit:
is_break = True
break
if is_break:
break
EPOCH = EPOCH + 1
self.number_of_epochs = EPOCH
print("EPOCH:", EPOCH, "objective value:", profit, 'val regret', self.val_regrets[-1], 'test regret', self.test_regrets[-1])
print('Training finished ')
print("-----------------------")
if self.is_parallel:
mypool.close()
return profit
def get_regret(self, X, Y, weights=None, pool=None):
model_params = {'alphas': self.alphas,
'const': self.const}
if pool is None:
# print('X shape', X[0].shape)
#
# print('y shape', len(Y))
average_objective_value_with_predicted_items = get_optimization_objective(X=X, Y=Y, weights=weights,
opt_params=self.opt_params,
model_params=model_params
)
optimal_average_objective_value = Solver.get_optimal_average_objective_value(X=X, Y=Y, weights=weights,
opt_params=self.opt_params,
)
# print('regret predicted item value set',average_objective_value_with_predicted_items,'regret with real item value',optimal_average_objective_value)
# print('pred obj', np.sum(average_objective_value_with_predicted_items))
# print('true obj', np.sum(optimal_average_objective_value))
# print(optimal_average_objective_value - average_objective_value_with_predicted_items)
# print('true obj', np.sum(optimal_average_objective_value))
regret = np.median(optimal_average_objective_value - average_objective_value_with_predicted_items)
# print('regret', regret)
# print(regret)
else:
map_func = partial(get_regret_worker, model_params=model_params, opt_params=self.opt_params)
iter = zip(X, Y, weights)
# [[x, y] for x, y in zip([4, 1, 0], [5, 1, 1])]
objective_values = pool.starmap(map_func, iter)
objective_values_predicted_items, optimal_objective_values = zip(*objective_values)
# print('optimal_average_objective_value', objective_values_predicted_items)
# print('average_objective_value_with_predicted_items', optimal_objective_values)
print(np.mean(np.concatenate(optimal_objective_values)))
regret = np.median(np.concatenate(optimal_objective_values) - np.concatenate(objective_values_predicted_items))
# print('true obj',np.sum(np.concatenate(optimal_objective_values)))
self.test_regret = regret
return regret
def get_MSE(self, X, Y):
predicted_values = compute_C_k(X.T, self.alphas, self.const, isSampling=False)
MSE = np.mean((Y - predicted_values) ** 2)
self.test_MSE = MSE
return MSE
def print(self):
first_line = ['Method', 'Max Step Size Order', 'Min Step Size Order', 'Run Time Limit', 'Epoch Limit',
'Mini Batch Size', 'Learning rate', 'Parallelism', 'Test MSE']
second_line = [self.sampling_method, self.max_step_size_magnitude, self.min_step_size_magnitude,
self.run_time_limit, self.epoch_limit, self.mini_batch_size, self.learning_rate,
self.is_parallel, self.test_MSE]
third_line = ['epochs', 'sub epochs', 'run time', 'training objective', 'test regret', 'val regret']
rest = np.array(
[self.epochs, self.sub_epochs, self.run_time, self.training_obj_value, self.test_regrets, self.val_regrets]).T.tolist()
print = []
print.append(first_line)
print.append(second_line)
print.append(third_line)
print.extend(rest)
return print
def get_file_name(self, file_type='.csv'):
file_name = str(self.sampling_method) + '-' + str(self.max_step_size_magnitude) + str(
self.min_step_size_magnitude) + file_type
return file_name
def get_and_clean_transition_points(benchmark_X, benchmark_Y, benchmark_weights, sampler, model_params, k, opt_params,
current_alpha):
best_transition_point, __, predicted_regrets, regrets, plot_x = sampler.get_transition_points(
model_params=model_params, k=k,
train_X=benchmark_X,
train_Y=benchmark_Y,
train_weights=benchmark_weights)
best_transition_point_set_benchmark = clean_transition_points(
transition_points=best_transition_point[-1],
benchmark_X=benchmark_X,
benchmark_Y=benchmark_Y, weights=benchmark_weights,
model_params=model_params, opt_params=opt_params,
current_alpha=current_alpha)
return best_transition_point_set_benchmark
def find_the_best_transition_point_benchmarks(train_X, train_Y, model_params, transition_point_list,
opt_params,
train_weights, prev_profit, k, pool=None):
alphas = model_params.get('alphas')
best_average_profit = prev_profit
best_transition_point = TransitionPoint(alphas[k, 0], true_profit=prev_profit)
if not (len(transition_point_list) == 1 and alphas[k, 0] == transition_point_list[0]):
if pool is not None:
map_func = partial(find_the_best_transition_point_benchmarks_worker, train_X=train_X, train_Y=train_Y,
train_weights=train_weights, model_params=model_params, opt_params=opt_params, k=k)
results = pool.map(map_func, transition_point_list)
results.append(best_transition_point)
# print('x', [transition_point.x for transition_point in results], ' objective_value' ,
# [transition_point.true_profit for transition_point in results])
best_transition_point = max(results, key=attrgetter('true_profit'))
else:
for transition_point_x in transition_point_list:
transition_point = find_the_best_transition_point_benchmarks_worker(transition_point_x, train_X=train_X,
train_Y=train_Y,
train_weights=train_weights,
model_params=model_params,
opt_params=opt_params, k=k)
if transition_point.true_profit > best_average_profit:
best_average_profit = transition_point.true_profit
best_transition_point = copy.deepcopy(transition_point)
return best_transition_point
def find_the_best_transition_point_benchmarks_worker(transition_point_x, train_X, train_Y, train_weights, model_params,
opt_params, k):
alphas = model_params.get('alphas')
alphas[k, 0] = transition_point_x
model_params['alphas'] = alphas
average_profit = np.median(get_optimization_objective(X=train_X, Y=train_Y,
weights=train_weights, opt_params=opt_params,
model_params=model_params))
# print('k: ' + str(k) + ' transition_point: ' + str(transition_point_x) + ' profit: ' + str(average_profit))
return TransitionPoint(transition_point_x, true_profit=average_profit)
def get_regret_worker(X, Y, weights, model_params, opt_params ):
# print('im in worker')
# print('X shape', X.shape)
# print('y shape', Y.shape)
# print('weights shape', weights.shape)
average_objective_value_with_predicted_items = get_optimization_objective(X=[X], Y=[Y], weights=[weights],
opt_params=opt_params,
model_params=model_params
)
# print('finished average_objective_value_with_predicted_items')
optimal_average_objective_value = Solver.get_optimal_average_objective_value(X=[X], Y=[Y], weights=[weights],
opt_params=opt_params,
)
# print('finished working')
return average_objective_value_with_predicted_items, optimal_average_objective_value
def converge(profit, prev_profit, conv_const, flag):
"""
A method to determine if the algorithm has reached the convergence point. Not used at the moment, but will be used in the full algorithm
:param cost:
:param prev_cost:
:param conv_const: Convergence limit
:return: is_converge : boolean
"""
if flag > 0:
return False
else:
print('prev profit', prev_profit, 'profit' , profit)
print('ratio', abs((profit - prev_profit) / profit))
if abs((profit - prev_profit) / profit) < conv_const:
is_converged = True
else:
is_converged = False
return is_converged
def initialize_parameters(X,Y):
"""
initialize the parameters of the predict-opt model, AKA first stage
:param train_set: dictionary containing X, and Y
:return: params: dictionary, has initialized parameters of the model.
"""
model = linear_model.Ridge().fit(X, Y)
coef = model.coef_
const = model.intercept_
params = {'alphas': coef.T,
'const': const}
return params
def clean_transition_points(transition_points, benchmark_X, benchmark_Y, weights, opt_params, model_params,
current_alpha):
cleaner_transition_points = set()
base_profit = np.median(Solver.get_optimization_objective(X=[benchmark_X], Y=[benchmark_Y], weights=weights,
model_params=model_params, opt_params=opt_params))
for transition_point in transition_points:
if transition_point.true_profit > base_profit:
cleaner_transition_points.add(transition_point.x)
if not cleaner_transition_points:
cleaner_transition_points.add(float(current_alpha))
return cleaner_transition_points
|
the-stack_0_11589 | '''
Copyright Vulcan Inc. 2018-2020
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
'''
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="gcsutils",
version="1.1.5",
description="Utility functions for Google Cloud Storage",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['gcsutils'],
install_requires=['google-cloud-storage==1.16.1'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
url='https://github.com/CoralMapping/proc_gcs_utils'
)
|
the-stack_0_11591 | # postgresql/psycopg2.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but
are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the :paramref:`.Connection.execution_options.stream_results`
execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
.. seealso::
:ref:`psycopg2_disable_native_unicode`
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect.
.. seealso::
:ref:`psycopg2_isolation_level`
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
using psycopg2's ``set_client_encoding()`` method.
.. seealso::
:ref:`psycopg2_unicode`
* ``executemany_mode``, ``executemany_batch_page_size``,
``executemany_values_page_size``: Allows use of psycopg2
extensions for optimizing "executemany"-stye queries. See the referenced
section below for details.
.. seealso::
:ref:`psycopg2_executemany_mode`
* ``use_batch_mode``: this is the previous setting used to affect "executemany"
mode and is now deprecated.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
.. seealso::
`PQconnectdbParams \
<http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Empty DSN Connections / Environment Variable Connections
---------------------------------------------------------
The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
libpq client library, which by default indicates to connect to a localhost
PostgreSQL database that is open for "trust" connections. This behavior can be
further tailored using a particular set of environment variables which are
prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of
any or all elements of the connection string.
For this form, the URL can be passed without any elements other than the
initial scheme::
engine = create_engine('postgresql+psycopg2://')
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
function which in turn represents an empty DSN passed to libpq.
.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
.. seealso::
`Environment Variables\
<https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
PostgreSQL documentation on how to use ``PG_...``
environment variables for connections.
.. _psycopg2_execution_options:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* ``isolation_level`` - Set the transaction isolation level for the lifespan
of a :class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
* ``stream_results`` - Enable or disable usage of psycopg2 server side
cursors - this feature makes use of "named" cursors in combination with
special result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
specifies the maximum number of rows to buffer at a time. This is
interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
buffer will grow to ultimately store 1000 rows at a time.
.. versionadded:: 1.0.6
.. _psycopg2_executemany_mode:
Psycopg2 Fast Execution Helpers
-------------------------------
Modern versions of psycopg2 include a feature known as
`Fast Execution Helpers \
<http://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which
have been shown in benchmarking to improve psycopg2's executemany()
performance, primarily with INSERT statements, by multiple orders of magnitude.
SQLAlchemy allows this extension to be used for all ``executemany()`` style
calls invoked by an :class:`.Engine` when used with :ref:`multiple parameter
sets <execute_multiple>`, which includes the use of this feature both by the
Core as well as by the ORM for inserts of objects with non-autogenerated
primary key values, by adding the ``executemany_mode`` flag to
:func:`.create_engine`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='batch')
.. versionchanged:: 1.3.7 - the ``use_batch_mode`` flag has been superseded
by a new parameter ``executemany_mode`` which provides support both for
psycopg2's ``execute_batch`` helper as well as the ``execute_values``
helper.
Possible options for ``executemany_mode`` include:
* ``None`` - By default, psycopg2's extensions are not used, and the usual
``cursor.executemany()`` method is used when invoking batches of statements.
* ``'batch'`` - Uses ``psycopg2.extras.execute_batch`` so that multiple copies
of a SQL query, each one corresponding to a parameter set passed to
``executemany()``, are joined into a single SQL string separated by a
semicolon. This is the same behavior as was provided by the
``use_batch_mode=True`` flag.
* ``'values'``- For Core :func:`.insert` constructs only (including those
emitted by the ORM automatically), the ``psycopg2.extras.execute_values``
extension is used so that multiple parameter sets are grouped into a single
INSERT statement and joined together with multiple VALUES expressions. This
method requires that the string text of the VALUES clause inside the
INSERT statement is manipulated, so is only supported with a compiled
:func:`.insert` construct where the format is predictable. For all other
constructs, including plain textual INSERT statements not rendered by the
SQLAlchemy expression language compiler, the
``psycopg2.extras.execute_batch`` method is used. It is therefore important
to note that **"values" mode implies that "batch" mode is also used for
all statements for which "values" mode does not apply**.
For both strategies, the ``executemany_batch_page_size`` and
``executemany_values_page_size`` arguments control how many parameter sets
should be represented in each execution. Because "values" mode implies a
fallback down to "batch" mode for non-INSERT statements, there are two
independent page size arguments. For each, the default value of ``None`` means
to use psycopg2's defaults, which at the time of this writing are quite low at
100. For the ``execute_values`` method, a number as high as 10000 may prove
to be performant, whereas for ``execute_batch``, as the number represents
full statements repeated, a number closer to the default of 100 is likely
more appropriate::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values',
executemany_values_page_size=10000, executemany_batch_page_size=500)
.. seealso::
:ref:`execute_multiple` - General information on using the
:class:`.Connection` object to execute statements in such a way as to make
use of the DBAPI ``.executemany()`` method.
.. versionchanged:: 1.3.7 - Added support for
``psycopg2.extras.execute_values``. The ``use_batch_mode`` flag is
superseded by the ``executemany_mode`` flag.
.. _psycopg2_unicode:
Unicode with Psycopg2
----------------------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf8``, as a more useful default::
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's
:meth:`psycopg2:connection.set_client_encoding` method
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
# set_client_encoding() setting;
# works for *all* PostgreSQL versions
engine = create_engine("postgresql://user:pass@host/dbname",
client_encoding='utf8')
This overrides the encoding specified in the PostgreSQL client configuration.
When using the parameter in this way, the psycopg2 driver emits
``SET client_encoding TO 'utf8'`` on the connection explicitly, and works
in all PostgreSQL versions.
Note that the ``client_encoding`` setting as passed to :func:`.create_engine`
is **not the same** as the more recently added ``client_encoding`` parameter
now supported by libpq directly. This is enabled when ``client_encoding``
is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed
using the :paramref:`.create_engine.connect_args` parameter::
engine = create_engine(
"postgresql://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'})
# using the query string is equivalent
engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8")
The above parameter was only added to libpq as of version 9.1 of PostgreSQL,
so using the previous method is better for cross-version support.
.. _psycopg2_disable_native_unicode:
Disabling Native Unicode
^^^^^^^^^^^^^^^^^^^^^^^^
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize its own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as most DBAPIs now support unicode fully.
Bound Parameter Styles
----------------------
The default parameter style for the psycopg2 dialect is "pyformat", where
SQL is rendered using ``%(paramname)s`` style. This format has the limitation
that it does not accommodate the unusual case of parameter names that
actually contain percent or parenthesis symbols; as SQLAlchemy in many cases
generates bound parameter names based on the name of a column, the presence
of these characters in a column name can lead to problems.
There are two solutions to the issue of a :class:`.schema.Column` that contains
one of these characters in its name. One is to specify the
:paramref:`.schema.Column.key` for columns that have such names::
measurement = Table('measurement', metadata,
Column('Size (meters)', Integer, key='size_meters')
)
Above, an INSERT statement such as ``measurement.insert()`` will use
``size_meters`` as the parameter name, and a SQL expression such as
``measurement.c.size_meters > 10`` will derive the bound parameter name
from the ``size_meters`` key as well.
.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key`
as the source of naming when anonymous bound parameters are created
in SQL expressions; previously, this behavior only applied to
:meth:`.Table.insert` and :meth:`.Table.update` parameter names.
The other solution is to use a positional format; psycopg2 allows use of the
"format" paramstyle, which can be passed to
:paramref:`.create_engine.paramstyle`::
engine = create_engine(
'postgresql://scott:tiger@localhost:5432/test', paramstyle='format')
With the above engine, instead of a statement like::
INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s)
{'Size (meters)': 1}
we instead see::
INSERT INTO measurement ("Size (meters)") VALUES (%s)
(1, )
Where above, the dictionary style is converted into a tuple with positional
style.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all PostgreSQL dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
as well as the ``isolation_level`` argument used by
:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these
options make use of psycopg2's ``set_isolation_level()`` connection method,
rather than emitting a PostgreSQL directive; this is because psycopg2's
API-level setting is always emitted at the start of each transaction in any
case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log PostgreSQL NOTICE messages
via the ``sqlalchemy.dialects.postgresql`` logger. When this logger
is set to the ``logging.INFO`` level, notice messages will be logged::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
Above, it is assumed that logging is configured externally. If this is not
the case, configuration such as ``logging.basicConfig()`` must be utilized::
import logging
logging.basicConfig() # log messages to stdout
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. seealso::
`Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
.. _psycopg2_hstore:
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when psycopg2 version 2.4 or greater is used, and
it is detected that the target database has the HSTORE type set up for use.
In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using
``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine
that the ``HSTORE`` extension is present.
This function is **skipped** if the version of psycopg2 installed is
less than version 2.4.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python
dictionaries being accepted as parameters regardless of the type of target
column in SQL**. The dictionaries are converted by this extension into a
textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
which may be more performant.
""" # noqa
from __future__ import absolute_import
import decimal
import logging
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import result as _result
from ...util import collections_abc
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self._expect_unicode is True:
# for py2k, if the enum type needs unicode data (which is set up as
# part of the Enum() constructor based on values passed as py2k
# unicode objects) we have to use our own converters since
# psycopg2's don't work, a rare exception to the "modern DBAPIs
# support unicode everywhere" theme of deprecating
# convert_unicode=True. Use the special "force_nocheck" directive
# which forces unicode conversion to happen on the Python side
# without an isinstance() check. in py3k psycopg2 does the right
# thing automatically.
self._expect_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
if dialect._has_native_jsonb:
return None
else:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_server_side_cursor(self):
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
def get_result_proxy(self):
self._log_notices(self.cursor)
if self._is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
# check also that notices is an iterable, after it's already
# established that we will be iterating through it. This is to get
# around test suites such as SQLAlchemy's using a Mock object for
# cursor
if not cursor.connection.notices or not isinstance(
cursor.connection.notices, collections_abc.Iterable
):
return
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
pass
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
pass
EXECUTEMANY_DEFAULT = util.symbol("executemany_default")
EXECUTEMANY_BATCH = util.symbol("executemany_batch")
EXECUTEMANY_VALUES = util.symbol("executemany_values")
class PGDialect_psycopg2(PGDialect):
driver = "psycopg2"
if util.py2k:
supports_unicode_statements = False
supports_server_side_cursors = True
default_paramstyle = "pyformat"
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
FEATURE_VERSION_MAP = dict(
native_json=(2, 5),
native_jsonb=(2, 5, 4),
sane_multi_rowcount=(2, 0, 9),
array_oid=(2, 4, 3),
hstore_adapter=(2, 4),
)
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
engine_config_types = PGDialect.engine_config_types.union(
[("use_native_unicode", util.asbool)]
)
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
@util.deprecated_params(
use_batch_mode=(
"1.3.7",
"The psycopg2 use_batch_mode flag is superseded by "
"executemany_mode='batch'",
)
)
def __init__(
self,
server_side_cursors=False,
use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
use_native_uuid=True,
executemany_mode=None,
executemany_batch_page_size=None,
executemany_values_page_size=None,
use_batch_mode=None,
**kwargs
):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.use_native_uuid = use_native_uuid
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
# Parse executemany_mode argument, allowing it to be only one of the
# symbol names
self.executemany_mode = util.symbol.parse_user_argument(
executemany_mode,
{
EXECUTEMANY_DEFAULT: [None],
EXECUTEMANY_BATCH: ["batch"],
EXECUTEMANY_VALUES: ["values"],
},
"executemany_mode",
)
if use_batch_mode:
self.executemany_mode = EXECUTEMANY_BATCH
self.executemany_batch_page_size = executemany_batch_page_size
self.executemany_values_page_size = executemany_values_page_size
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x) for x in m.group(1, 2, 3) if x is not None
)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = (
self.use_native_hstore
and self._hstore_oids(connection.connection) is not None
)
self._has_native_json = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_json"]
)
self._has_native_jsonb = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_jsonb"]
)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["sane_multi_rowcount"]
and self.executemany_mode is EXECUTEMANY_DEFAULT
)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
"AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
"READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
"READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
"REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
"SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace("_", " ")]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
extras = self._psycopg2_extras()
extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
def on_connect(conn):
extras.register_uuid(None, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
kw = {"oid": oid}
if util.py2k:
kw["unicode"] = True
if (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["array_oid"]
):
kw["array_oid"] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
if self._has_native_json:
extras.register_default_json(
conn, loads=self._json_deserializer
)
if self._has_native_jsonb:
extras.register_default_jsonb(
conn, loads=self._json_deserializer
)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
def do_executemany(self, cursor, statement, parameters, context=None):
if self.executemany_mode is EXECUTEMANY_DEFAULT:
cursor.executemany(statement, parameters)
return
if (
self.executemany_mode is EXECUTEMANY_VALUES
and context
and context.isinsert
and context.compiled.insert_single_values_expr
):
executemany_values = (
"(%s)" % context.compiled.insert_single_values_expr
)
# guard for statement that was altered via event hook or similar
if executemany_values not in statement:
executemany_values = None
else:
executemany_values = None
if executemany_values:
# Currently, SQLAlchemy does not pass "RETURNING" statements
# into executemany(), since no DBAPI has ever supported that
# until the introduction of psycopg2's executemany_values, so
# we are not yet using the fetch=True flag.
statement = statement.replace(executemany_values, "%s")
if self.executemany_values_page_size:
kwargs = {"page_size": self.executemany_values_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_values(
cursor,
statement,
parameters,
template=executemany_values,
**kwargs
)
else:
if self.executemany_batch_page_size:
kwargs = {"page_size": self.executemany_batch_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_batch(
cursor, statement, parameters, **kwargs
)
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= self.FEATURE_VERSION_MAP["hstore_adapter"]:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts:
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
# send individual dbname, user, password, host, port
# parameters to psycopg2.connect()
return ([], opts)
elif url.query:
# any other connection arguments, pass directly
opts.update(url.query)
return ([], opts)
else:
# no connection arguments whatsoever; psycopg2.connect()
# requires that "dsn" be present as a blank string.
return ([""], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
if getattr(connection, "closed", False):
return True
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Operation timed out",
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
|
the-stack_0_11592 | from __future__ import absolute_import
import os
import posixpath
import pysvn
from cobra.core.constants import README_MARKUPS
from cobra.core.markup import rest2html, can_markup, is_markdown, is_rst, is_plain
from cobra.core.markdown import markdown
def get_readme(repository, path='', revision=None):
# 1 - md
# 2 - rst
# 3 - txt
readme_suffix_names = README_MARKUPS
readme_name = ''
if repository.root.endswith(posixpath.sep):
root = repository.root[:-1]
else:
root = repository.root
root_path = '%s%s' % (root, path)
if revision is None:
revision = repository.get_latest_revision()
c = repository.get_svn_client()
r = pysvn.Revision(pysvn.opt_revision_kind.number, revision)
ls = c.list(root_path, recurse=False, peg_revision=r, revision=r)
ls = map(lambda y: dict(y.items()), map(lambda x: x[0], ls))
for item in ls:
if pysvn.node_kind.file == item['kind']:
# sometimes double slashes appear in the returned path
node_path = item['repos_path']
# we meet a special situation, it is that the root of repo is not real 'root'
# and we have no permission to access the real root, so, the repos_path that
# has the prefix of real root must be cut off.
if repository.prefix:
repo_prefix = repository.prefix
if repo_prefix.endswith(posixpath.sep):
repo_prefix = repo_prefix[:-1]
node_path = node_path.replace(repo_prefix, '/', 1)
if node_path.startswith('//'):
node_path = node_path[1:]
head, tail = os.path.split(node_path)
file_name, file_suffix = os.path.splitext(tail)
if file_name.lower() == 'readme' and (file_suffix in readme_suffix_names):
readme_name = node_path
break
if file_name.lower() == 'readme' and (file_suffix.lower()=='.txt' or file_suffix==''):
readme_name = node_path
if readme_name:
content = c.cat('%s%s' % (root, readme_name), revision=r, peg_revision=r)
try:
content = content.decode('utf-8')
except UnicodeDecodeError:
content = content.decode('gbk')
if readme_name.startswith('/'):
readme_name = readme_name[1:]
if is_markdown(readme_name):
content = markdown(content)
elif is_rst(readme_name):
content = rest2html(content)
else:
content = '<pre class="plain-readme">' + content + '</pre>'
readme = {
'name': readme_name,
'content': content
}
return readme
else:
return |
the-stack_0_11593 | # coding: utf-8
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
A script to calculate and plot optical spectra from ab initio calculations.
"""
import os
from glob import glob
import sys
import logging
import warnings
import argparse
from collections import OrderedDict
import matplotlib as mpl
mpl.use('Agg')
from pymatgen.io.vasp import Vasprun
from pymatgen.util.string import latexify
from sumo.io import questaal
from sumo.plotting.optics_plotter import SOpticsPlotter
from sumo.electronic_structure.optics import (broaden_eps,
calculate_dielectric_properties,
write_files)
__author__ = "Alex Ganose"
__version__ = "1.0"
__maintainer__ = "Alex Ganose"
__email__ = "[email protected]"
__date__ = "Jan 10, 2018"
def optplot(modes=('absorption',), filenames=None, codes='vasp',
prefix=None, directory=None,
gaussian=None, band_gaps=None, labels=None, average=True, height=6,
width=6, xmin=0, xmax=None, ymin=0, ymax=1e5, colours=None,
style=None, no_base_style=None,
image_format='pdf', dpi=400, plt=None, fonts=None):
"""A script to plot optical absorption spectra from VASP calculations.
Args:
modes (:obj:`list` or :obj:`tuple`):
Ordered list of :obj:`str` determining properties to plot.
Accepted options are 'absorption' (default), 'eps', 'eps-real',
'eps-im', 'n', 'n-real', 'n-im', 'loss' (equivalent to n-im).
filenames (:obj:`str` or :obj:`list`, optional): Path to data file.
For VASP this is a *vasprun.xml* file (can be gzipped); for
Questaal the *opt.ext* file from *lmf* or *eps_BSE.out* from
*bethesalpeter* may be used.
Alternatively, a list of paths can be
provided, in which case the absorption spectra for each will be
plotted concurrently.
codes (:obj:`str` or :obj:`list`, optional): Original
calculator. Accepted values are 'vasp' and 'questaal'. Items should
correspond to filenames.
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
gaussian (:obj:`float`): Standard deviation for gaussian broadening.
band_gaps (:obj:`float`, :obj:`str` or :obj:`list`, optional): The band
gap as a :obj:`float`, plotted as a dashed line. If plotting
multiple spectra then a :obj:`list` of band gaps can be provided.
Band gaps can be provided as a floating-point number or as a path
to a *vasprun.xml* file. To skip over a line, set its bandgap to
zero or a negative number to place it outside the visible range.
labels (:obj:`str` or :obj:`list`): A label to identify the spectra.
If plotting multiple spectra then a :obj:`list` of labels can
be provided.
average (:obj:`bool`, optional): Average the dielectric response across
all lattice directions. Defaults to ``True``.
height (:obj:`float`, optional): The height of the plot.
width (:obj:`float`, optional): The width of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
ymin (:obj:`float`, optional): The minimum absorption intensity on the
y-axis.
ymax (:obj:`float`, optional): The maximum absorption intensity on the
y-axis.
colours (:obj:`list`, optional): A :obj:`list` of colours to use in the
plot. The colours can be specified as a hex code, set of rgb
values, or any other format supported by matplotlib.
style (:obj:`list` or :obj:`str`, optional): (List of) matplotlib style
specifications, to be composed on top of Sumo base style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base style.
This can make alternative styles behave more predictably.
image_format (:obj:`str`, optional): The image file format. Can be any
format supported by matplotlib, including: png, jpg, pdf, and svg.
Defaults to pdf.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
Returns:
A matplotlib pyplot object.
"""
# Don't write files if this is being done to manipulate existing plt
save_files = False if plt else True
##### BUILD LIST OF FILES AUTOMATICALLY IF NECESSARY #####
if codes == 'vasp':
if not filenames:
if os.path.exists('vasprun.xml'):
filenames = ['vasprun.xml']
elif os.path.exists('vasprun.xml.gz'):
filenames = ['vasprun.xml.gz']
else:
logging.error('ERROR: No vasprun.xml found!')
sys.exit()
elif codes == 'questaal':
if not filenames:
if len(glob('opt.*')) > 0:
filenames = glob('opt.*')
if len(filenames) == 1:
logging.info("Found optics file: " + filenames[0])
else:
logging.info("Found optics files: " + ", ".join(filenames))
if isinstance(filenames, str):
filenames = [filenames]
if isinstance(codes, str):
codes = [codes] * len(filenames)
elif len(codes) == 1:
codes = list(codes) * len(filenames)
#### ITERATE OVER FILES READING DIELECTRIC DATA ####
dielectrics = []
auto_labels = []
auto_band_gaps = []
for i, (filename, code) in enumerate(zip(filenames, codes)):
if code == 'vasp':
vr = Vasprun(filename)
dielectrics.append(vr.dielectric)
auto_labels.append(
latexify(vr.final_structure.composition.reduced_formula).
replace('$_', '$_\mathregular'))
if isinstance(band_gaps, list) and not band_gaps:
# band_gaps = [], auto band gap requested
auto_band_gaps.append(
vr.get_band_structure().get_band_gap()['energy'])
else:
auto_band_gaps.append(None)
elif code == 'questaal':
if not save_files:
out_filename = None
elif len(filenames) == 1:
out_filename = 'dielectric.dat'
else:
out_filename = 'dielectric_{0}.dat'.format(i + 1)
dielectrics.append(
questaal.dielectric_from_file(filename, out_filename))
auto_band_gaps.append(None)
auto_labels.append(filename.split('.')[-1])
if isinstance(band_gaps, list) and not band_gaps:
logging.info('Bandgap requested but not supported for Questaal'
' file {}: skipping...'.format(filename))
else:
raise Exception('Code selection "{}" not recognised'.format(code))
if not labels and len(filenames) > 1:
labels = auto_labels
#### PROCESS DIELECTRIC DATA: BROADENING AND DERIVED PROPERTIES ####
if gaussian:
dielectrics = [broaden_eps(d, gaussian)
for d in dielectrics]
# initialize spectrum data ready to append from each dataset
abs_data = OrderedDict()
for mode in modes:
abs_data.update({mode: []})
# for each calculation, get all required properties and append to data
for d in dielectrics:
for mode, spectrum in calculate_dielectric_properties(
d, set(modes), average=average).items():
abs_data[mode].append(spectrum)
if isinstance(band_gaps, list) and not band_gaps:
# empty list therefore use bandgaps collected from vasprun files
band_gaps = auto_band_gaps
elif isinstance(band_gaps, list):
# list containing filenames and/or values: mutate the list in-place
for i, item in enumerate(band_gaps):
if item is None:
pass
elif _floatable(item):
band_gaps[i] = float(item)
elif 'vasprun' in item:
band_gaps[i] = (
Vasprun(item).get_band_structure().get_band_gap()['energy']
)
else:
raise ValueError('Format not recognised for auto bandgap: '
'{}.'.format(item))
plotter = SOpticsPlotter(abs_data, band_gap=band_gaps, label=labels)
plt = plotter.get_plot(width=width, height=height, xmin=xmin,
xmax=xmax, ymin=ymin, ymax=ymax,
colours=colours, dpi=dpi, plt=plt, fonts=fonts,
style=style, no_base_style=no_base_style)
if save_files:
basename = 'absorption'
if prefix:
basename = '{}_{}'.format(prefix, basename)
image_filename = '{}.{}'.format(basename, image_format)
if directory:
image_filename = os.path.join(directory, image_filename)
plt.savefig(image_filename, format=image_format, dpi=dpi)
for mode, data in abs_data.items():
basename = 'absorption' if mode == 'abs' else mode
write_files(data, basename=basename,
prefix=prefix, directory=directory)
else:
return plt
def _floatable(item):
"""Check if an item can be intepreted with float()"""
try:
float(item)
return True
except ValueError:
return False
def _get_parser():
parser = argparse.ArgumentParser(description="""
optplot is a script to produce optical absorption spectra diagrams""",
epilog="""
Author: {}
Version: {}
Last updated: {}""".format(__author__, __version__, __date__))
parser.add_argument('mode', type=str, nargs='*', default='absorption',
metavar='M',
choices={'absorption', 'loss', 'eps_real', 'eps_imag',
'n_real', 'n_imag'},
help='Optical properties to plot. Multiple choices '
' will be displayed as subplots. Accepted values:'
' "absorption" (optical absorption over distance)'
', "loss" (energy-loss function -Im(1/eps)), '
'"eps_real" and "eps_imag" (real and imaginary '
'parts of the dielectric function), '
'"n_real" (real part of complex refractive index)'
'"n_imag" (imaginary part of RI, also known as '
'the extinction coefficient kappa.)')
parser.add_argument('-f', '--filenames', metavar='F',
help='path to one or more vasprun.xml files',
default=None, nargs='+')
parser.add_argument('-p', '--prefix', metavar='P',
help='prefix for the files generated')
parser.add_argument('-d', '--directory', metavar='D',
help='output directory for files')
parser.add_argument('-c', '--code', metavar='C', default='vasp', nargs='+',
help=('Original calculator. Accepted values are '
'"vasp" and "questaal".'))
parser.add_argument('-g', '--gaussian', type=float, metavar='G',
help='standard deviation of gaussian broadening')
parser.add_argument('-b', '--bandgaps', nargs='*', metavar='E',
help=('indicate the fundamental band gap (options: '
'nothing, vasprun.xml file, or float). A '
'sequence of files and values may be provided, '
'corresponding to the optical data files. '
'To skip a line, set a value outside the plot '
'range (e.g. -1).'))
parser.add_argument('-l', '--labels', nargs='+', metavar='L',
help='labels for the absorption specta')
parser.add_argument('-a', '--anisotropic', action='store_false',
help='separate spectra into to x, y, and z directions')
parser.add_argument('--height', type=float, default=None,
help='height of the graph')
parser.add_argument('--width', type=float, default=None,
help='width of the graph')
parser.add_argument('--xmin', type=float, default=0.,
help='minimum energy on the x-axis')
parser.add_argument('--xmax', type=float, default=None,
help='maximum energy on the x-axis')
parser.add_argument('--ymin', type=str, default=['auto'], nargs='+',
help='minimum intensity on the y-axis; may specify '
'multiple values if plotting more than one axis. '
'Use "auto" or "_" for automatic value.')
parser.add_argument('--ymax', type=str, default=['auto'], nargs='+',
help='maximum intensity on the y-axis; may specify'
'multiple values if plotting more than one axis. '
'Use "auto" or "_" for automatic value.')
parser.add_argument('--style', type=str, nargs='+', default=None,
help='matplotlib style specifications')
parser.add_argument('--no-base-style', action='store_true',
dest='no_base_style',
help='prevent use of sumo base style')
parser.add_argument('--format', type=str, default='pdf',
dest='image_format', metavar='FORMAT',
help='image file format (options: pdf, svg, jpg, png)')
parser.add_argument('--dpi', type=int, default=400,
help='pixel density for image file')
parser.add_argument('--font', default=None, help='font to use')
return parser
def main():
args = _get_parser().parse_args()
logging.basicConfig(filename='sumo-optplot.log', level=logging.INFO,
filemode='w', format='%(message)s')
console = logging.StreamHandler()
logging.info(" ".join(sys.argv[:]))
logging.getLogger('').addHandler(console)
warnings.filterwarnings("ignore", category=UserWarning,
module="matplotlib")
warnings.filterwarnings("ignore", category=UnicodeWarning,
module="matplotlib")
warnings.filterwarnings("ignore", category=UserWarning,
module="pymatgen")
# Wrap mode into list if necessary
if not isinstance(args.mode, list):
args.mode = [args.mode]
# Replace text placeholders with preferred Python representation: None
ymin = [None if (x.lower() in ('auto', '_')) else float(x)
for x in args.ymin]
ymax = [None if (x.lower() in ('auto', '_')) else float(x)
for x in args.ymax]
# Settings should be list corresponding to n_plots, or value for all
ymin = ymin[0] if len(ymin) == 1 else ymin
ymax = ymax[0] if len(ymax) == 1 else ymax
optplot(modes=args.mode, filenames=args.filenames, codes=args.code,
prefix=args.prefix, directory=args.directory,
gaussian=args.gaussian, band_gaps=args.bandgaps,
labels=args.labels, average=args.anisotropic, height=args.height,
width=args.width, xmin=args.xmin, xmax=args.xmax, ymin=ymin,
ymax=ymax, colours=None, image_format=args.image_format,
dpi=args.dpi, style=args.style, no_base_style=args.no_base_style,
fonts=args.font)
if __name__ == "__main__":
main()
|
the-stack_0_11594 | # Copyright 2014 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.n1kv import n1kv_client
LOG = logging.getLogger(__name__)
_resource_metadata = {'port': ['id', 'macAddress', 'ipAddress', 'subnetId'],
'vmnetwork': ['name', 'networkSegmentId',
'networkSegment', 'portProfile',
'portProfileId', 'tenantId',
'portId', 'macAddress',
'ipAddress', 'subnetId'],
'subnet': ['addressRangeStart', 'addressRangeEnd',
'ipAddressSubnet', 'description', 'gateway',
'dhcp', 'dnsServersList', 'networkAddress',
'netSegmentName', 'id', 'tenantId']}
class TestClient(n1kv_client.Client):
def __init__(self, **kwargs):
self.broken = False
self.inject_params = False
self.total_profiles = 2
super(TestClient, self).__init__()
def _get_total_profiles(self):
return self.total_profiles
def _do_request(self, method, action, body=None, headers=None):
if self.broken:
raise c_exc.VSMError(reason='VSM:Internal Server Error')
if self.inject_params and body:
body['invalidKey'] = 'catchMeIfYouCan'
if method == 'POST':
return _validate_resource(action, body)
elif method == 'GET':
if 'virtual-port-profile' in action:
return _policy_profile_generator(
self._get_total_profiles())
else:
raise c_exc.VSMError(reason='VSM:Internal Server Error')
class TestClientInvalidRequest(TestClient):
def __init__(self, **kwargs):
super(TestClientInvalidRequest, self).__init__()
self.inject_params = True
class TestClientInvalidResponse(TestClient):
def __init__(self, **kwargs):
super(TestClientInvalidResponse, self).__init__()
self.broken = True
def _validate_resource(action, body=None):
if body:
body_set = set(body.keys())
else:
return
if 'vm-network' in action and 'port' not in action:
vmnetwork_set = set(_resource_metadata['vmnetwork'])
if body_set - vmnetwork_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'port' in action:
port_set = set(_resource_metadata['port'])
if body_set - port_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'subnet' in action:
subnet_set = set(_resource_metadata['subnet'])
if body_set - subnet_set:
raise c_exc.VSMError(reason='Invalid Request')
else:
return
def _policy_profile_generator(total_profiles):
"""
Generate policy profile response and return a dictionary.
:param total_profiles: integer representing total number of profiles to
return
"""
profiles = {}
for num in range(1, total_profiles + 1):
name = "pp-%s" % num
profile_id = "00000000-0000-0000-0000-00000000000%s" % num
profiles[name] = {"properties": {"name": name, "id": profile_id}}
return profiles
def _policy_profile_generator_xml(total_profiles):
"""
Generate policy profile response in XML format.
:param total_profiles: integer representing total number of profiles to
return
"""
xml = ["""<?xml version="1.0" encoding="utf-8"?>
<set name="virtual_port_profile_set">"""]
template = (
'<instance name="%(num)d"'
' url="/api/n1k/virtual-port-profile/%(num)s">'
'<properties>'
'<id>00000000-0000-0000-0000-00000000000%(num)s</id>'
'<name>pp-%(num)s</name>'
'</properties>'
'</instance>'
)
xml.extend(template % {'num': n} for n in range(1, total_profiles + 1))
xml.append("</set>")
return ''.join(xml)
|
the-stack_0_11595 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import json
import os
def main(blastdir):
print('Populating organism selftargeting spacer objects')
for fn in glob.glob(blastdir + '/*.json'):
# get loci intervals of organism
accession = os.path.splitext(os.path.split(fn)[1])[0]
q_org = Organism.objects.filter(accession=accession)
if not q_org.exists():
print('Organism with accession {} is not in db but blast '
'report exists'.format(accession))
continue
org = q_org[0]
interval_loci = [
(entry['genomic_start'], entry['genomic_end'])
for entry in org.locus_set.all().values('genomic_start',
'genomic_end')
]
with open(fn, 'r') as f:
try:
blastrec = json.loads(f.read())
except Exception as e:
print('Error on accession {}\n{}'.format(accession, e))
continue
for res in blastrec['BlastOutput2']:
query = res['report']['results']['bl2seq'][0]
spacerid = query['query_title']
for hit in query['hits']:
for hsps in hit['hsps']:
start_h, end_h = hsps['hit_from'], hsps['hit_to']
in_locus = any([start_h > start and end_h < end
for start, end in interval_loci])
if in_locus:
continue
q_spacer = Spacer.objects.filter(id=int(spacerid))
if not q_spacer.exists():
print('Spacer with sequence {} for organism {} '
'not found in db'.format(hsps['qseq'],
org.accession))
continue
spacer = q_spacer[0]
evalue = float(hsps['evalue'])
oselftarget, _ = OrganismSelfSpacer.objects.get_or_create(
organism=org,
spacer=spacer,
evalue=evalue,
genomic_start=start_h,
genomic_end=end_h
)
if __name__ == '__main__':
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "phageAPI.settings")
django.setup()
from restapi.models import Organism, OrganismSelfSpacer, Spacer
main('gbfiles/blastoutput')
|
the-stack_0_11597 | """This module implements row model of MUFG bank CSV."""
from __future__ import annotations
from abc import ABC
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Optional
from zaimcsvconverter.file_csv_convert import FileCsvConvert
from zaimcsvconverter.inputcsvformats import InputRow, InputRowFactory, InputStoreRow, InputStoreRowData
from zaimcsvconverter.utility import Utility
@dataclass
class MufgRowData(InputStoreRowData):
"""This class implements data class for wrapping list of MUFG bunk CSV row model."""
# Reason: This implement depends on design of CSV. pylint: disable=too-many-instance-attributes
class Summary(Enum):
CARD = "カ−ド"
CARD_CONVENIENCE_STORE_ATM = "カ−ドC1"
class CashFlowKind(Enum):
"""This class implements constant of cash flow kind in MUFG CSV."""
INCOME = "入金"
PAYMENT = "支払い"
TRANSFER_INCOME = "振替入金"
TRANSFER_PAYMENT = "振替支払い"
_date: str
summary: str
_summary_content: str
_payed_amount: str
_deposit_amount: str
balance: str
note: str
is_uncapitalized: str
_cash_flow_kind: str
@property
def date(self) -> datetime:
return datetime.strptime(self._date, "%Y/%m/%d")
@property
def store_name(self) -> str:
return self._summary_content
@property
def payed_amount(self) -> Optional[int]:
return Utility.convert_string_to_int_or_none(self._payed_amount)
@property
def deposit_amount(self) -> Optional[int]:
return Utility.convert_string_to_int_or_none(self._deposit_amount)
@property
def cash_flow_kind(self) -> MufgRowData.CashFlowKind:
return self.CashFlowKind(self._cash_flow_kind)
@property
def validate(self) -> bool:
self.stock_error(lambda: self.date, f"Invalid date. Date = {self._date}")
# This comment prevents pylint duplicate-code.
self.stock_error(lambda: self.payed_amount, f"Invalid payed amount. Payed amount = {self._payed_amount}")
self.stock_error(
lambda: self.deposit_amount, f"Invalid deposit amount. Deposit amount = {self._deposit_amount}"
)
self.stock_error(
lambda: self.cash_flow_kind,
'The value of "Cash flow kind" has not been defined in this code. '
f"Cash flow kind = {self._cash_flow_kind}",
)
return super().validate
class MufgRow(InputRow):
"""This class implements row model of MUFG bank CSV."""
def __init__(self, input_row_data: MufgRowData, *args, **kwargs):
super().__init__(input_row_data, *args, **kwargs)
self.cash_flow_kind: MufgRowData.CashFlowKind = input_row_data.cash_flow_kind
self._summary: str = input_row_data.summary
@property
def is_income(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.INCOME
@property
def is_payment(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.PAYMENT
@property
def is_transfer_income(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.TRANSFER_INCOME
@property
def is_transfer_payment(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.TRANSFER_PAYMENT
@property
def is_by_card(self) -> bool:
return (
self._summary == MufgRowData.Summary.CARD.value
or self._summary == MufgRowData.Summary.CARD_CONVENIENCE_STORE_ATM.value
)
@property
def is_income_from_other_own_account(self) -> bool:
return self.is_income and self.is_by_card
class MufgIncomeRow(MufgRow, ABC):
"""This class implements income row model of MUFG bank CSV."""
def __init__(self, row_data: MufgRowData, *args, **kwargs):
super().__init__(row_data, *args, **kwargs)
self._deposit_amount: Optional[int] = row_data.deposit_amount
@property
def deposit_amount(self) -> int:
if self._deposit_amount is None:
raise ValueError("Deposit amount on income row is not allowed empty.")
return self._deposit_amount
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.deposit_amount,
f"Deposit amount in income row is required. Deposit amount = {self._deposit_amount}",
)
return super().validate
class MufgPaymentRow(MufgRow, ABC):
"""This class implements payment row model of MUFG bank CSV."""
def __init__(self, row_data: MufgRowData, *args, **kwargs):
super().__init__(row_data, *args, **kwargs)
self._payed_amount: Optional[int] = row_data.payed_amount
@property
def payed_amount(self) -> int:
if self._payed_amount is None:
raise ValueError("Payed amount on payment row is not allowed empty.")
return self._payed_amount
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.payed_amount, f"Payed amount in payment row is required. Payed amount = {self._payed_amount}"
)
return super().validate
class MufgIncomeFromSelfRow(MufgIncomeRow):
"""This class implements income from self row model of MUFG bank CSV."""
class MufgPaymentToSelfRow(MufgPaymentRow):
"""This class implements payment from self row model of MUFG bank CSV."""
# pylint: disable=too-many-instance-attributes
class MufgStoreRow(MufgRow, InputStoreRow, ABC):
"""This class implements row model of MUFG bank CSV."""
def __init__(self, input_row_data: MufgRowData):
super().__init__(input_row_data, FileCsvConvert.MUFG.value)
@property
def is_transfer_income_from_other_own_account(self) -> bool:
"""This method returns whether this row is transfer income from other own account or not."""
return self.is_transfer_income and self.store.transfer_target is not None
@property
def is_transfer_payment_to_other_own_account(self) -> bool:
"""This method returns whether this row is transfer payment to other own account or not."""
return self.is_transfer_payment and self.store.transfer_target is not None
# pylint: disable=too-many-ancestors
class MufgIncomeFromOthersRow(MufgStoreRow, MufgIncomeRow):
"""This class implements row model of MUFG bank CSV."""
# pylint: disable=too-many-ancestors
class MufgPaymentToSomeoneRow(MufgStoreRow, MufgPaymentRow):
"""
This class implements payment row model of MUFG bank CSV.
It may to others, also may to self.
"""
class MufgRowFactory(InputRowFactory[MufgRowData, MufgRow]):
"""This class implements factory to create MUFG CSV row instance."""
def create(self, input_row_data: MufgRowData) -> MufgRow:
if input_row_data.is_empty_store_name and input_row_data.cash_flow_kind == MufgRowData.CashFlowKind.INCOME:
return MufgIncomeFromSelfRow(input_row_data)
if input_row_data.is_empty_store_name and input_row_data.cash_flow_kind == MufgRowData.CashFlowKind.PAYMENT:
return MufgPaymentToSelfRow(input_row_data)
if input_row_data.cash_flow_kind in (
MufgRowData.CashFlowKind.PAYMENT,
MufgRowData.CashFlowKind.TRANSFER_PAYMENT,
):
return MufgPaymentToSomeoneRow(input_row_data)
if input_row_data.cash_flow_kind in (MufgRowData.CashFlowKind.INCOME, MufgRowData.CashFlowKind.TRANSFER_INCOME):
return MufgIncomeFromOthersRow(input_row_data)
raise ValueError(
f"Cash flow kind is not supported. Cash flow kind = {input_row_data.cash_flow_kind}"
) # pragma: no cover
# Reason: This line is insurance for future development so process must be not able to reach
|
the-stack_0_11598 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base evaluator."""
import abc
import dataclasses
from typing import List, Optional, Union
import numpy as np
@dataclasses.dataclass
class EvaluatorOutput:
"""The output of an evaluator."""
# An evaluator does not necessarily generate all fields below. For example,
# some evaluators like Kendalls Tau return a scalar and image metric, while
# TwoWayCycleConsistency only generates a scalar metric.
scalar: Optional[Union[float, List[float]]] = None
image: Optional[Union[np.ndarray, List[np.ndarray]]] = None
video: Optional[Union[np.ndarray, List[np.ndarray]]] = None
@staticmethod
def _assert_same_attrs(list_out):
"""Ensures a list of this class instance have the same attributes."""
def _not_none(o):
return [getattr(o, a) is not None for a in ["scalar", "image", "video"]]
expected = _not_none(list_out[0])
for o in list_out[1:]:
actual = _not_none(o)
assert np.array_equal(expected, actual)
@staticmethod
def merge(list_out):
"""Merge a list of this class instance into one."""
# We need to make sure that all elements of the list have the same
# non-empty (i.e. != None) attributes.
EvaluatorOutput._assert_same_attrs(list_out)
# At this point, we're confident that we only need to check the
# attributes of the first member of the list to guarantee the same
# availability for *all* other members of the list.
scalars = None
if list_out[0].scalar is not None:
scalars = [o.scalar for o in list_out]
images = None
if list_out[0].image is not None:
images = [o.image for o in list_out]
videos = None
if list_out[0].video is not None:
videos = [o.video for o in list_out]
return EvaluatorOutput(scalars, images, videos)
def log(self, logger, global_step, name, prefix):
"""Log the attributes to tensorboard."""
if self.scalar is not None:
if isinstance(self.scalar, list):
self.scalar = np.mean(self.scalar)
logger.log_scalar(self.scalar, global_step, name, prefix)
if self.image is not None:
if isinstance(self.image, list):
for i, image in enumerate(self.image):
logger.log_image(image, global_step, name + f"_{i}", prefix)
else:
logger.log_image(self.image, global_step, name, prefix)
if self.video is not None:
if isinstance(self.video, list):
for i, video in enumerate(self.video):
logger.log_video(video, global_step, name + f"_{i}", prefix)
else:
logger.log_video(self.video, global_step, name, prefix)
logger.flush()
class Evaluator(abc.ABC):
"""Base class for evaluating a self-supervised model on downstream tasks.
Subclasses must implement the `_evaluate` method.
"""
def __init__(self, inter_class):
self.inter_class = inter_class
@abc.abstractmethod
def evaluate(self, outs):
"""Evaluate the downstream task in embedding space.
Args:
outs: A list of outputs generated by the model on the downstream dataset.
:meta public:
"""
pass
|
the-stack_0_11599 | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
elif rcParams['backend'] == 'Qt4Agg':
QT_RC_MAJOR_VERSION = 4
else:
# A different backend was specified, but we still got here because a Qt
# related file was imported. This is allowed, so lets try and guess
# what we should be using.
if "PyQt4" in sys.modules or "PySide" in sys.modules:
# PyQt4 or PySide is actually used.
QT_RC_MAJOR_VERSION = 4
else:
# This is a fallback: PyQt5
QT_RC_MAJOR_VERSION = 5
QT_API = None
# check if any binding is already imported, if so silently ignore the
# rcparams/ENV settings and use what ever is already imported.
if 'PySide' in sys.modules:
# user has imported PySide before importing mpl
QT_API = QT_API_PYSIDE
if 'PyQt4' in sys.modules:
# user has imported PyQt4 before importing mpl
# this case also handles the PyQt4v2 case as once sip is imported
# the API versions can not be changed so do not try
QT_API = QT_API_PYQT
if 'PyQt5' in sys.modules:
# the user has imported PyQt5 before importing mpl
QT_API = QT_API_PYQT5
if (QT_API_ENV is not None) and QT_API is None:
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
elif rcParams['backend'] == 'Qt4Agg':
QT_API = rcParams['backend.qt4']
else:
# A non-Qt backend was specified, no version of the Qt
# bindings is imported, but we still got here because a Qt
# related file was imported. This is allowed, fall back to Qt5
# using which ever binding the rparams ask for.
QT_API = rcParams['backend.qt5']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API == QT_API_PYQT5:
try:
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
except ImportError:
# fell through, tried PyQt5, failed fall back to PyQt4
QT_API = rcParams['backend.qt4']
QT_RC_MAJOR_VERSION = 4
# needs to be if so we can re-test the value of QT_API which may
# have been changed in the above if block
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
try:
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
except NameError:
# QtCore did not get imported, fall back to pyside
QT_API = QT_API_PYSIDE
if QT_API == QT_API_PYSIDE: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
def is_pyqt5():
return QT_API == QT_API_PYQT5
|
the-stack_0_11600 | from pygame import *
from random import *
from time import time as timer
win_widh = 1000
win_hight = 400
win = display.set_mode((win_widh, win_hight))
display.set_caption('Plants')
ImegHero = 'Woodman.png'
ImeBack = 'Forest.png'
ImeAnemi = 'BigCliz.png'
img_bullet = 'Ball.png'
TimeNow = timer()
TimeHit = timer()
# clock = time.Clock()
class GameSprite(sprite.Sprite):
def __init__(self, PLimage, playX, playY, sizeX, sizeY, speed):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(PLimage), (sizeX, sizeY))
self.speed = speed
self.rect = self.image.get_rect()
self.rect.x = playX
self.rect.y = playY
def reset(self):
win.blit(self.image, (self.rect.x, self.rect.y))
class Plaer(GameSprite):
def Went(self):
global LastWent
global TimeNow
went = key.get_pressed()
if went[K_UP]:
self.rect.y -= self.speed
if went[K_DOWN]:
self.rect.y += self.speed
if went[K_LEFT]:
self.rect.x -= self.speed
LastWent = 'Left'
if went[K_RIGHT]:
self.rect.x += self.speed
LastWent = 'Right'
if (timer() - TimeNow) > 2:
if went[K_SPACE]:
TimeNow = timer()
if LastWent == 'Left':
self.rect.x -= 10*self.speed
else:
self.rect.x += 10*self.speed
def fire(self):
bullet = Bullet(img_bullet, self.rect.centerx, self.rect.centery, 15, 20, 15)
bullets.add(bullet)
class PlantsAnami(GameSprite):
def update(self):
self.rect.x += self.speed
if self.rect.x < 0:
self.rect.x = win_widh
self.rect.y = randint(150, 340)
class Bullet(GameSprite):
def update(self):
self.rect.x += self.speed
if self.rect.x > win_widh:
self.kill()
fps = 30
LastWent = 'Left'
HeroGad = Plaer(ImegHero, 50, 50, 60, 60, 10)
GameRun = True
bacgraund = transform.scale(image.load(ImeBack), (win_widh, win_hight))
monsters = sprite.Group()
ds = [50, win_widh-100]
for i in range(1,7):
monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 400-90), 72, 72, randint(-5,-1))
monsters.add(monster)
bullets = sprite.Group()
def DrawAll():
win.blit(bacgraund, (0,0))
# draw.rect(win, (0,255,0), (0, 350, 1000, 50))
HeroGad.reset()
HeroGad.Went()
monsters.update()
monsters.draw(win)
bullets.update()
bullets.draw(win)
display.update()
while GameRun:
for e in event.get():
if e.type == QUIT:
GameRun = False
elif e.type == KEYDOWN:
if e.key == K_q:
HeroGad.fire()
collide = sprite.groupcollide(bullets, monsters, True, True)
for c in collide:
while len(monsters) < 6:
monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 340), 72, 72, randint(-5,-1))
monsters.add(monster)
# tap = key.get_pressed()
# if sprite.spritecollide(HeroGad, monsters, True):
# print('AAAAAAAAAAAAAA')
# # monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 340), 72, 72, randint(-5,-1))
# # monsters.add(monster)
# while len(monsters)<6:
# monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 340), 72, 72, randint(-5,-1))
# monsters.add(monster)
DrawAll()
time.delay(fps) |
the-stack_0_11601 | # pylint: disable=g-direct-third-party-import
# pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates the embedded_tools.zip that is part of the Bazel binary."""
import contextlib
import fnmatch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD*', lambda x: 'tools/jdk/BUILD'),
('*tools/platforms/platforms.BUILD', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*jarjar_command_deploy.jar',
lambda x: 'tools/jdk/jarjar_command_deploy.jar'),
('*BUILD-new.pkg', lambda x: 'tools/jdk/BUILD.pkg'),
('*BUILD.javalangtools', lambda x: 'third_party/java/jdk/langtools/BUILD'),
('*singlejar_local.exe', lambda x: 'tools/jdk/singlejar/singlejar.exe'),
('*singlejar_local', lambda x: 'tools/jdk/singlejar/singlejar'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*ijar.exe', lambda x: 'tools/jdk/ijar/ijar.exe'),
('*ijar', lambda x: 'tools/jdk/ijar/ijar'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*src/objc_tools/*',
lambda x: 'tools/objc/precomp_' + os.path.basename(x)),
('*xcode*StdRedirect.dylib', lambda x: 'tools/objc/StdRedirect.dylib'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*realpath', lambda x: 'tools/objc/realpath'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*.sh', lambda x: 'tools/objc/' + os.path.basename(x)),
('*src/tools/xcode/*',
lambda x: 'tools/objc/' + os.path.basename(x) + '.sh'),
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
# BUILD.tools are stored as BUILD files.
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
"""Returns a sorted list of tuples (archive_file, input_file).
This describes the files that should be put into the generated archive.
Args:
argsfile: The file containing the list of input files.
"""
with open(argsfile, 'r') as f:
input_files = set(x.strip() for x in f.readlines())
result = {}
for input_file in input_files:
# If we have both a BUILD and a BUILD.tools file, take the latter only.
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
# This gives us the same behavior as the older bash version of this
# tool: If two input files map to the same output files, the one that
# comes last in the list of input files overrides all earlier ones.
result[get_output_path(input_file)] = input_file
# By sorting the file list, the resulting ZIP file will not be reproducible
# and deterministic.
return sorted(result.items())
def copy_jdk_into_archive(output_zip, archive_file, input_file):
"""Extract the JDK and adds it to the archive under jdk/*."""
def _replace_dirname(filename):
# Rename the first folder to 'jdk', because Bazel looks for a
# bundled JDK in the embedded tools using that folder name.
return 'jdk/' + '/'.join(filename.split('/')[1:])
# The JDK is special - it's extracted instead of copied.
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
for archive_file, input_file in input_files:
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
|
the-stack_0_11603 | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for checking that required Python packages are installed."""
from collections import deque
import os
import pkg_resources
from perfkitbenchmarker import errors
# Path of the root of the current git branch.
_BRANCH_ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
def _CheckRequirements(requirements_file_path):
"""Checks that all package requirements specified in a file are met.
Args:
requirements_file_path: string. Path to a pip requirements file.
"""
with open(requirements_file_path, 'rb') as fp:
requirements_to_check = [(requirements_file_path, deque(fp.readlines()))]
try:
while requirements_to_check:
file_path, lines = requirements_to_check.pop()
while lines:
line = lines.popleft().strip()
if line.startswith('-r'):
requirements_to_check.append((file_path, lines))
file_path = os.path.join(os.path.dirname(file_path), line[2:])
with open(file_path, 'rb') as fp:
lines = deque(fp.readlines())
elif line:
pkg_resources.require(line)
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict) as e:
# In newer versions of setuptools, these exception classes have a report
# method that provides a readable description of the error.
report = getattr(e, 'report', None)
err_msg = report() if report else str(e)
raise errors.Setup.PythonPackageRequirementUnfulfilled(
'A Python package requirement was not met while checking "{path}": '
'{msg}{linesep}To install required packages, execute the following '
'command:{linesep}pip install -r "{path}"{linesep}To bypass package '
'requirement checks, run PerfKit Benchmarker with the '
'--ignore_package_requirements flag.'.format(
linesep=os.linesep, msg=err_msg, path=requirements_file_path))
def CheckBasicRequirements():
"""Checks that all basic package requirements are met.
The basic requirements include packages used by modules that are imported
regardless of the specified cloud providers. The list of required packages
and versions is found in the requirements.txt file in the git branch's root
directory. If such a file does not exist, then the requirements check is
skipped.
"""
requirements_file_path = os.path.join(_BRANCH_ROOT_DIR, 'requirements.txt')
if os.path.isfile(requirements_file_path):
_CheckRequirements(requirements_file_path)
def CheckProviderRequirements(provider):
"""Checks that all provider-specific requirements are met.
The provider-specific requirements include packages used by modules that are
imported when using a particular cloud provider. The list of required packages
is found in the requirements-<provider>.txt file in the git branch's root
directory. If such a file does not exist, then no additional requirements are
necessary.
Args:
provider: string. Lowercase name of the cloud provider (e.g. 'gcp').
"""
requirements_file_path = os.path.join(
_BRANCH_ROOT_DIR, 'perfkitbenchmarker', 'providers', provider,
'requirements.txt')
if os.path.isfile(requirements_file_path):
_CheckRequirements(requirements_file_path)
|
the-stack_0_11604 | """Load a layout in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, Optional
import bpy
import json
from avalon import api
from avalon.blender.pipeline import AVALON_CONTAINERS
from avalon.blender.pipeline import AVALON_CONTAINER_ID
from avalon.blender.pipeline import AVALON_PROPERTY
from avalon.blender.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin
class JsonLayoutLoader(plugin.AssetLoader):
"""Load layout published from Unreal."""
families = ["layout"]
representations = ["json"]
label = "Load Layout"
icon = "code-fork"
color = "orange"
animation_creator_name = "CreateAnimation"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
api.remove(obj.get(AVALON_PROPERTY))
def _remove_animation_instances(self, asset_group):
instances = bpy.data.collections.get(AVALON_INSTANCES)
if instances:
for obj in list(asset_group.children):
anim_collection = instances.children.get(
obj.name + "_animation")
if anim_collection:
bpy.data.collections.remove(anim_collection)
def _get_loader(self, loaders, family):
name = ""
if family == 'rig':
name = "BlendRigLoader"
elif family == 'model':
name = "BlendModelLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
def _process(self, libpath, asset, asset_group, actions):
bpy.ops.object.select_all(action='DESELECT')
with open(libpath, "r") as fp:
data = json.load(fp)
all_loaders = api.discover(api.Loader)
for element in data:
reference = element.get('reference')
family = element.get('family')
loaders = api.loaders_from_representation(all_loaders, reference)
loader = self._get_loader(loaders, family)
if not loader:
continue
instance_name = element.get('instance_name')
action = None
if actions:
action = actions.get(instance_name, None)
options = {
'parent': asset_group,
'transform': element.get('transform'),
'action': action,
'create_animation': True if family == 'rig' else False,
'animation_asset': asset
}
# This should return the loaded asset, but the load call will be
# added to the queue to run in the Blender main thread, so
# at this time it will not return anything. The assets will be
# loaded in the next Blender cycle, so we use the options to
# set the transform, parent and assign the action, if there is one.
api.load(
loader,
reference,
namespace=instance_name,
options=options
)
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
self._process(libpath, asset, asset_group, None)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = asset_group.children
return asset_group.children
def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
actions = {}
for obj in asset_group.children:
obj_meta = obj.get(AVALON_PROPERTY)
if obj_meta.get('family') == 'rig':
rig = None
for child in obj.children:
if child.type == 'ARMATURE':
rig = child
break
if not rig:
raise Exception("No armature in the rig asset group.")
if rig.animation_data and rig.animation_data.action:
namespace = obj_meta.get('namespace')
actions[namespace] = rig.animation_data.action
mat = asset_group.matrix_basis.copy()
self._remove_animation_instances(asset_group)
self._remove(asset_group)
self._process(str(libpath), asset_group, actions)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
if not asset_group:
return False
self._remove_animation_instances(asset_group)
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
return True
|
the-stack_0_11606 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of `tf.signal` functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
__all__ = [
'fft',
'fft2d',
'fft3d',
'ifft',
'ifft2d',
'ifft3d',
'irfft',
'irfft2d',
'irfft3d',
'rfft',
'rfft2d',
'rfft3d',
]
fft = utils.copy_docstring(
'tf.signal.fft',
lambda input, name=None: np.fft.fftn(input, axes=[-1]))
fft2d = utils.copy_docstring(
'tf.signal.fft2d',
lambda input, name=None: np.fft.fftn(input, axes=[-2, -1]))
fft3d = utils.copy_docstring(
'tf.signal.fft3d',
lambda input, name=None: np.fft.fftn(input, axes=[-3, -2, -1]))
ifft = utils.copy_docstring(
'tf.signal.ifft',
lambda input, name=None: np.fft.ifftn(input, axes=[-1]))
ifft2d = utils.copy_docstring(
'tf.signal.ifft2d',
lambda input, name=None: np.fft.ifftn(input, axes=[-2, -1]))
ifft3d = utils.copy_docstring(
'tf.signal.ifft3d',
lambda input, name=None: np.fft.ifftn(input, axes=[-3, -2, -1]))
rfft = utils.copy_docstring(
'tf.signal.rfft',
lambda input, fft_length=None, name=None: np.fft.rfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-1]))
rfft2d = utils.copy_docstring(
'tf.signal.rfft2d',
lambda input, fft_length=None, name=None: np.fft.rfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-2, -1]))
rfft3d = utils.copy_docstring(
'tf.signal.rfft3d',
lambda input, fft_length=None, name=None: np.fft.rfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-3, -2, -1]))
irfft = utils.copy_docstring(
'tf.signal.irfft',
lambda input, fft_length=None, name=None: np.fft.irfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-1]))
irfft2d = utils.copy_docstring(
'tf.signal.irfft2d',
lambda input, fft_length=None, name=None: np.fft.irfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-2, -1]))
irfft3d = utils.copy_docstring(
'tf.signal.irfft3d',
lambda input, fft_length=None, name=None: np.fft.irfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-3, -2, -1]))
|
the-stack_0_11607 | import re
import django.forms
def get_cleaned_text_file_content(uploaded_file):
"""Read uploaded file, try to fix up encoding to UTF-8 and
transform line endings into Unix style, then return the content as
a UTF-8 string. Errors are reported as
django.forms.ValidationError exceptions."""
if not uploaded_file:
return u""
if uploaded_file.size and uploaded_file.size > 10 * 1000 * 1000:
raise django.forms.ValidationError("Text file too large (size %s)." % uploaded_file.size)
content = "".join(uploaded_file.chunks())
# try to fixup encoding
import magic
if hasattr(magic, "open"):
m = magic.open(magic.MAGIC_MIME)
m.load()
filetype = m.buffer(content)
else:
m = magic.Magic()
m.cookie = magic.magic_open(magic.MAGIC_NONE | magic.MAGIC_MIME | magic.MAGIC_MIME_ENCODING)
magic.magic_load(m.cookie, None)
filetype = m.from_buffer(content)
if not filetype.startswith("text"):
raise django.forms.ValidationError("Uploaded file does not appear to be a text file.")
match = re.search("charset=([\w-]+)", filetype)
if not match:
raise django.forms.ValidationError("File has unknown encoding.")
encoding = match.group(1)
if "ascii" not in encoding:
try:
content = content.decode(encoding)
except Exception as e:
raise django.forms.ValidationError("Error decoding file (%s). Try submitting with UTF-8 encoding or remove non-ASCII characters." % str(e))
# turn line-endings into Unix style
content = content.replace("\r\n", "\n").replace("\r", "\n")
return content.encode("utf-8")
|
the-stack_0_11609 | #!/usr/bin/python3
import spidev
import time
tx_array = [0]*512
# Split an integer input into a two byte array to send via SPI
def write_pot(input):
print(input)
msb = input >> 8
lsb = input & 0xFF
print(spi.xfer([msb,lsb,msb,lsb]))
if __name__ == '__main__':
spi = spidev.SpiDev()
ret = spi.open(0, 0)
print("Spi.open = ", ret)
spi.max_speed_hz = 30000
spi.mode = 0
print("Started SPIDEV = ", spi)
data = 0x555
while True:
# print("Hello, I'm MMRPi-Hardware Energomera Library")
time.sleep(0.5)
data = data + 1
write_pot(data)
# break
|
the-stack_0_11610 |
safety_hotline_meta = {
'attributes': {
'primary': {
'field': 'description',
'name': 'Description',
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': 'date_created',
'date_granularity': 'year',
'default_date_filter': '2017',
'min_date': '2008',
'max_date': '2018'
},
}
crash_meta = {
'attributes': {
'primary': {
'field': 'crash_dt',
'name': 'Crash Date',
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': 'crash_dt',
'date_granularity': 'year',
'default_date_filter': '2014',
'min_date': '2004',
'max_date': '2014'
},
}
block_change_meta = {
'attributes': {
'primary': {
'field': 'stops_pct_change',
'name': 'Ridership Change from 2009 to 2017',
'visualization': {
'type': 'Text',
'comparison_value': None,
'comparison_name': None,
},
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': None,
'date_granularity': None,
'default_date_filter': '2017',
'min_date': None,
'max_date': None
},
}
route_change_meta = {
'attributes': {
'primary': {
'field': 'pct_change',
'name': 'Ridership Change from 2009 to 2017',
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': None,
'date_granularity': None,
'default_date_filter': '2017',
'min_date': None,
'max_date': None
},
}
sensors_meta = {
'attributes': {
'primary': {
'field': None,
'name': None,
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': None,
'date_granularity': None,
'default_date_filter': '2018',
'min_date': None,
'max_date': None
},
}
|
the-stack_0_11612 | #cfgfactory.py
import utility
import numpy.random
import cfg
import logging
class CFGFactory:
def __init__(self):
self.number_terminals = 100
self.number_nonterminals = 5
self.binary_rules = 40
self.lexical_rules = 100
self.strict_cnf = True
def generate_nonterminals(self):
nonterminals = [ 'S']
for i in range(1,self.number_nonterminals):
nonterminals.append("NT" + str(i))
return nonterminals
def sample_uniform(self, lp=0.5,bp = 0.5):
"""
Sample all productions Bernoulli for lexical and binary. Default 0.5.
"""
lexicon = list(utility.generate_lexicon(self.number_terminals))
#print("Lexicon",lexicon,self.number_terminals)
nonterminals = self.generate_nonterminals()
productions = []
for a in nonterminals:
for b in lexicon:
if numpy.random.random() < lp:
productions.append((a,b))
for a in nonterminals:
for b in nonterminals[1:]:
for c in nonterminals[1:]:
if numpy.random.random() < bp:
productions.append((a,b,c))
my_cfg = cfg.CFG()
my_cfg.start = nonterminals[0]
my_cfg.nonterminals = set(nonterminals)
my_cfg.terminals = set(lexicon)
my_cfg.productions = productions
return my_cfg
def sample_full(self):
lexicon = list(utility.generate_lexicon(self.number_terminals))
#print("Lexicon",lexicon,self.number_terminals)
nonterminals = self.generate_nonterminals()
lprods = set()
bprods= set()
for a in nonterminals:
for b in lexicon:
lprods.add((a,b))
for a in nonterminals:
for b in nonterminals[1:]:
for c in nonterminals[1:]:
bprods.add((a,b,c))
my_cfg = cfg.CFG()
my_cfg.start = nonterminals[0]
my_cfg.nonterminals = set(nonterminals)
my_cfg.terminals = set(lexicon)
my_cfg.productions = lprods | bprods
#print(my_cfg.terminals)
return my_cfg
def sample_trim(self):
"""
Sample one and then trim it.
return the trim one.
If empty, raise an exception.
"""
my_cfg = self.sample_raw()
#print([ prod for prod in my_cfg.productions if len(prod) == 2 and prod[0] == 'S'])
logging.info("CFG nominally has %d nonterminals, %d terminals, %d binary_rules and %d lexical rules", self.number_nonterminals,self.number_terminals,self.binary_rules,self.lexical_rules)
ts = my_cfg.compute_trim_set()
if len(ts) == 0:
# empty language
raise ValueError("Empty language")
prods = my_cfg.compute_usable_productions(ts)
terminals = set()
for prod in prods:
if len(prod) == 2:
terminals.add(prod[1])
tcfg = cfg.CFG()
tcfg.start = my_cfg.start
tcfg.terminals = terminals
tcfg.nonterminals = ts
tcfg.productions = set(prods)
logging.info("Final CFG has %d nonterminals, %d terminals, %d binary_rules and %d lexical rules",
len(tcfg.nonterminals), len(tcfg.terminals),
len([prod for prod in tcfg.productions if len(prod) == 3]),
len([prod for prod in tcfg.productions if len(prod) == 2]))
return tcfg
def sample_raw(self):
"""
return a CFG
"""
lexicon = list(utility.generate_lexicon(self.number_terminals))
#DEBUGGING
lexicon.sort()
print(lexicon[0],lexicon[-1])
nonterminals = self.generate_nonterminals()
lprods = set()
bprods= set()
lexicon_size = len(lexicon)
while len(lprods) < self.lexical_rules:
lhs = numpy.random.choice(nonterminals)
rhs = lexicon[numpy.random.choice(range(lexicon_size))]
lprods.add( (lhs,rhs))
print(lhs,rhs)
while len(bprods) < self.binary_rules:
if self.strict_cnf:
a = numpy.random.choice(nonterminals)
b,c = numpy.random.choice(nonterminals[1:],size=2)
else:
a,b,c = numpy.random.choice(nonterminals,size=3)
bprods.add( (a,b,c))
print(a,b,c)
my_cfg = cfg.CFG()
my_cfg.start = nonterminals[0]
my_cfg.nonterminals = set(nonterminals)
my_cfg.terminals = set(lexicon)
my_cfg.productions = lprods | bprods
return my_cfg
|
the-stack_0_11614 | from PIL import Image
class Painter:
def __init__(self, k, palette_name, color):
self.k = k
self.palette_name = palette_name
self.color = color
self.ctr = 0 # for frames
def format_frame(self,n):
return f"frames/{self.palette_name}-{self.k}-{n}.png"
def current_frame_name(self):
self.ctr += 1
return self.format_frame(self.ctr-1)
def save_to_image(self, grid, filename):
k = self.k
color = self.color
n = len(grid) # assume grid is square, too lazy to generalize
wall = k//2
with Image.new('RGB',(k*n+2*wall,k*n+2*wall)) as painting:
for i in range(k*n+2*wall):
for j in range(wall):
painting.putpixel((i,j),(0,0,0))
painting.putpixel((i,k*n+2*wall-j-1),(0,0,0))
painting.putpixel((j,i),(0,0,0))
painting.putpixel((k*n+2*wall-j-1,i),(0,0,0))
for i in range(k*n):
for j in range(k*n):
painting.putpixel((i+wall,j+wall),color[grid[i//k][j//k]])
painting.save(filename,"PNG")
print(f"Created {filename}") |
the-stack_0_11615 | # This file is a part of OpenCV project.
# It is a subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html.
#
# Copyright (C) 2018, Intel Corporation, all rights reserved.
# Third party copyrights are property of their respective owners.
#
# Use this script to get the text graph representation (.pbtxt) of SSD-based
# deep learning network trained in TensorFlow Object Detection API.
# Then you can import it with a binary frozen graph (.pb) using readNetFromTensorflow() function.
# See details and examples on the following wiki page: https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API
import argparse
import re
from math import sqrt
from tf_text_graph_common import *
class SSDAnchorGenerator:
def __init__(self, min_scale, max_scale, num_layers, aspect_ratios,
reduce_boxes_in_lowest_layer, image_width, image_height):
self.min_scale = min_scale
self.aspect_ratios = aspect_ratios
self.reduce_boxes_in_lowest_layer = reduce_boxes_in_lowest_layer
self.image_width = image_width
self.image_height = image_height
self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1)
for i in range(num_layers)] + [1.0]
def get(self, layer_id):
if layer_id == 0 and self.reduce_boxes_in_lowest_layer:
widths = [0.1, self.min_scale * sqrt(2.0), self.min_scale * sqrt(0.5)]
heights = [0.1, self.min_scale / sqrt(2.0), self.min_scale / sqrt(0.5)]
else:
widths = [self.scales[layer_id] * sqrt(ar) for ar in self.aspect_ratios]
heights = [self.scales[layer_id] / sqrt(ar) for ar in self.aspect_ratios]
widths += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])]
heights += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])]
min_size = min(self.image_width, self.image_height)
widths = [w * min_size for w in widths]
heights = [h * min_size for h in heights]
return widths, heights
class MultiscaleAnchorGenerator:
def __init__(self, min_level, aspect_ratios, scales_per_octave, anchor_scale):
self.min_level = min_level
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.scales = [2**(float(s) / scales_per_octave) for s in range(scales_per_octave)]
def get(self, layer_id):
widths = []
heights = []
for a in self.aspect_ratios:
for s in self.scales:
base_anchor_size = 2**(self.min_level + layer_id) * self.anchor_scale
ar = sqrt(a)
heights.append(base_anchor_size * s / ar)
widths.append(base_anchor_size * s * ar)
return widths, heights
def createSSDGraph(modelPath, configPath, outputPath):
# Nodes that should be kept.
keepOps = ['Conv2D', 'BiasAdd', 'Add', 'AddV2', 'Relu', 'Relu6', 'Placeholder', 'FusedBatchNorm',
'DepthwiseConv2dNative', 'ConcatV2', 'Mul', 'MaxPool', 'AvgPool', 'Identity',
'Sub', 'ResizeNearestNeighbor', 'Pad', 'FusedBatchNormV3', 'Mean']
# Node with which prefixes should be removed
prefixesToRemove = ('MultipleGridAnchorGenerator/', 'Concatenate/', 'Postprocessor/', 'Preprocessor/map')
# Load a config file.
config = readTextMessage(configPath)
config = config['model'][0]['ssd'][0]
num_classes = int(config['num_classes'][0])
fixed_shape_resizer = config['image_resizer'][0]['fixed_shape_resizer'][0]
image_width = int(fixed_shape_resizer['width'][0])
image_height = int(fixed_shape_resizer['height'][0])
box_predictor = 'convolutional' if 'convolutional_box_predictor' in config['box_predictor'][0] else 'weight_shared_convolutional'
anchor_generator = config['anchor_generator'][0]
if 'ssd_anchor_generator' in anchor_generator:
ssd_anchor_generator = anchor_generator['ssd_anchor_generator'][0]
min_scale = float(ssd_anchor_generator['min_scale'][0])
max_scale = float(ssd_anchor_generator['max_scale'][0])
num_layers = int(ssd_anchor_generator['num_layers'][0])
aspect_ratios = [float(ar) for ar in ssd_anchor_generator['aspect_ratios']]
reduce_boxes_in_lowest_layer = True
if 'reduce_boxes_in_lowest_layer' in ssd_anchor_generator:
reduce_boxes_in_lowest_layer = ssd_anchor_generator['reduce_boxes_in_lowest_layer'][0] == 'true'
priors_generator = SSDAnchorGenerator(min_scale, max_scale, num_layers,
aspect_ratios, reduce_boxes_in_lowest_layer,
image_width, image_height)
print('Scale: [%f-%f]' % (min_scale, max_scale))
print('Aspect ratios: %s' % str(aspect_ratios))
print('Reduce boxes in the lowest layer: %s' % str(reduce_boxes_in_lowest_layer))
elif 'multiscale_anchor_generator' in anchor_generator:
multiscale_anchor_generator = anchor_generator['multiscale_anchor_generator'][0]
min_level = int(multiscale_anchor_generator['min_level'][0])
max_level = int(multiscale_anchor_generator['max_level'][0])
anchor_scale = float(multiscale_anchor_generator['anchor_scale'][0])
aspect_ratios = [float(ar) for ar in multiscale_anchor_generator['aspect_ratios']]
scales_per_octave = int(multiscale_anchor_generator['scales_per_octave'][0])
num_layers = max_level - min_level + 1
priors_generator = MultiscaleAnchorGenerator(min_level, aspect_ratios,
scales_per_octave, anchor_scale)
print('Levels: [%d-%d]' % (min_level, max_level))
print('Anchor scale: %f' % anchor_scale)
print('Scales per octave: %d' % scales_per_octave)
print('Aspect ratios: %s' % str(aspect_ratios))
else:
print('Unknown anchor_generator')
exit(0)
print('Number of classes: %d' % num_classes)
print('Number of layers: %d' % num_layers)
print('box predictor: %s' % box_predictor)
print('Input image size: %dx%d' % (image_width, image_height))
# Read the graph.
outNames = ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes']
writeTextGraph(modelPath, outputPath, outNames)
graph_def = parseTextGraph(outputPath)
def getUnconnectedNodes():
unconnected = []
for node in graph_def.node:
unconnected.append(node.name)
for inp in node.input:
if inp in unconnected:
unconnected.remove(inp)
return unconnected
def fuse_nodes(nodesToKeep):
# Detect unfused batch normalization nodes and fuse them.
# Add_0 <-- moving_variance, add_y
# Rsqrt <-- Add_0
# Mul_0 <-- Rsqrt, gamma
# Mul_1 <-- input, Mul_0
# Mul_2 <-- moving_mean, Mul_0
# Sub_0 <-- beta, Mul_2
# Add_1 <-- Mul_1, Sub_0
nodesMap = {node.name: node for node in graph_def.node}
subgraphBatchNorm = ['Add',
['Mul', 'input', ['Mul', ['Rsqrt', ['Add', 'moving_variance', 'add_y']], 'gamma']],
['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]]
subgraphBatchNormV2 = ['AddV2',
['Mul', 'input', ['Mul', ['Rsqrt', ['AddV2', 'moving_variance', 'add_y']], 'gamma']],
['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]]
# Detect unfused nearest neighbor resize.
subgraphResizeNN = ['Reshape',
['Mul', ['Reshape', 'input', ['Pack', 'shape_1', 'shape_2', 'shape_3', 'shape_4', 'shape_5']],
'ones'],
['Pack', ['StridedSlice', ['Shape', 'input'], 'stack', 'stack_1', 'stack_2'],
'out_height', 'out_width', 'out_channels']]
def checkSubgraph(node, targetNode, inputs, fusedNodes):
op = targetNode[0]
if node.op == op and (len(node.input) >= len(targetNode) - 1):
fusedNodes.append(node)
for i, inpOp in enumerate(targetNode[1:]):
if isinstance(inpOp, list):
if not node.input[i] in nodesMap or \
not checkSubgraph(nodesMap[node.input[i]], inpOp, inputs, fusedNodes):
return False
else:
inputs[inpOp] = node.input[i]
return True
else:
return False
nodesToRemove = []
for node in graph_def.node:
inputs = {}
fusedNodes = []
if checkSubgraph(node, subgraphBatchNorm, inputs, fusedNodes) or \
checkSubgraph(node, subgraphBatchNormV2, inputs, fusedNodes):
name = node.name
node.Clear()
node.name = name
node.op = 'FusedBatchNorm'
node.input.append(inputs['input'])
node.input.append(inputs['gamma'])
node.input.append(inputs['beta'])
node.input.append(inputs['moving_mean'])
node.input.append(inputs['moving_variance'])
node.addAttr('epsilon', 0.001)
nodesToRemove += fusedNodes[1:]
inputs = {}
fusedNodes = []
if checkSubgraph(node, subgraphResizeNN, inputs, fusedNodes):
name = node.name
node.Clear()
node.name = name
node.op = 'ResizeNearestNeighbor'
node.input.append(inputs['input'])
node.input.append(name + '/output_shape')
out_height_node = nodesMap[inputs['out_height']]
out_width_node = nodesMap[inputs['out_width']]
out_height = int(out_height_node.attr['value']['tensor'][0]['int_val'][0])
out_width = int(out_width_node.attr['value']['tensor'][0]['int_val'][0])
shapeNode = NodeDef()
shapeNode.name = name + '/output_shape'
shapeNode.op = 'Const'
shapeNode.addAttr('value', [out_height, out_width])
graph_def.node.insert(graph_def.node.index(node), shapeNode)
nodesToKeep.append(shapeNode.name)
nodesToRemove += fusedNodes[1:]
for node in nodesToRemove:
graph_def.node.remove(node)
nodesToKeep = []
fuse_nodes(nodesToKeep)
removeIdentity(graph_def)
def to_remove(name, op):
return (not name in nodesToKeep) and \
(op == 'Const' or (not op in keepOps) or name.startswith(prefixesToRemove))
removeUnusedNodesAndAttrs(to_remove, graph_def)
# Connect input node to the first layer
assert(graph_def.node[0].op == 'Placeholder')
try:
input_shape = graph_def.node[0].attr['shape']['shape'][0]['dim']
input_shape[1]['size'] = image_height
input_shape[2]['size'] = image_width
except:
print("Input shapes are undefined")
# assert(graph_def.node[1].op == 'Conv2D')
weights = graph_def.node[1].input[-1]
for i in range(len(graph_def.node[1].input)):
graph_def.node[1].input.pop()
graph_def.node[1].input.append(graph_def.node[0].name)
graph_def.node[1].input.append(weights)
# check and correct the case when preprocessing block is after input
preproc_id = "Preprocessor/"
if graph_def.node[2].name.startswith(preproc_id) and \
graph_def.node[2].input[0].startswith(preproc_id):
if not any(preproc_id in inp for inp in graph_def.node[3].input):
graph_def.node[3].input.insert(0, graph_def.node[2].name)
# Create SSD postprocessing head ###############################################
# Concatenate predictions of classes, predictions of bounding boxes and proposals.
def addConcatNode(name, inputs, axisNodeName):
concat = NodeDef()
concat.name = name
concat.op = 'ConcatV2'
for inp in inputs:
concat.input.append(inp)
concat.input.append(axisNodeName)
graph_def.node.extend([concat])
addConstNode('concat/axis_flatten', [-1], graph_def)
addConstNode('PriorBox/concat/axis', [-2], graph_def)
for label in ['ClassPredictor', 'BoxEncodingPredictor' if box_predictor is 'convolutional' else 'BoxPredictor']:
concatInputs = []
for i in range(num_layers):
# Flatten predictions
flatten = NodeDef()
if box_predictor is 'convolutional':
inpName = 'BoxPredictor_%d/%s/BiasAdd' % (i, label)
else:
if i == 0:
inpName = 'WeightSharedConvolutionalBoxPredictor/%s/BiasAdd' % label
else:
inpName = 'WeightSharedConvolutionalBoxPredictor_%d/%s/BiasAdd' % (i, label)
flatten.input.append(inpName)
flatten.name = inpName + '/Flatten'
flatten.op = 'Flatten'
concatInputs.append(flatten.name)
graph_def.node.extend([flatten])
addConcatNode('%s/concat' % label, concatInputs, 'concat/axis_flatten')
num_matched_layers = 0
for node in graph_def.node:
if re.match('BoxPredictor_\d/BoxEncodingPredictor/convolution', node.name) or \
re.match('BoxPredictor_\d/BoxEncodingPredictor/Conv2D', node.name) or \
re.match('WeightSharedConvolutionalBoxPredictor(_\d)*/BoxPredictor/Conv2D', node.name):
node.addAttr('loc_pred_transposed', True)
num_matched_layers += 1
assert(num_matched_layers == num_layers)
# Add layers that generate anchors (bounding boxes proposals).
priorBoxes = []
boxCoder = config['box_coder'][0]
fasterRcnnBoxCoder = boxCoder['faster_rcnn_box_coder'][0]
boxCoderVariance = [1.0/float(fasterRcnnBoxCoder['x_scale'][0]), 1.0/float(fasterRcnnBoxCoder['y_scale'][0]), 1.0/float(fasterRcnnBoxCoder['width_scale'][0]), 1.0/float(fasterRcnnBoxCoder['height_scale'][0])]
for i in range(num_layers):
priorBox = NodeDef()
priorBox.name = 'PriorBox_%d' % i
priorBox.op = 'PriorBox'
if box_predictor is 'convolutional':
priorBox.input.append('BoxPredictor_%d/BoxEncodingPredictor/BiasAdd' % i)
else:
if i == 0:
priorBox.input.append('WeightSharedConvolutionalBoxPredictor/BoxPredictor/Conv2D')
else:
priorBox.input.append('WeightSharedConvolutionalBoxPredictor_%d/BoxPredictor/BiasAdd' % i)
priorBox.input.append(graph_def.node[0].name) # image_tensor
priorBox.addAttr('flip', False)
priorBox.addAttr('clip', False)
widths, heights = priors_generator.get(i)
priorBox.addAttr('width', widths)
priorBox.addAttr('height', heights)
priorBox.addAttr('variance', boxCoderVariance)
graph_def.node.extend([priorBox])
priorBoxes.append(priorBox.name)
# Compare this layer's output with Postprocessor/Reshape
addConcatNode('PriorBox/concat', priorBoxes, 'concat/axis_flatten')
# Sigmoid for classes predictions and DetectionOutput layer
addReshape('ClassPredictor/concat', 'ClassPredictor/concat3d', [0, -1, num_classes + 1], graph_def)
sigmoid = NodeDef()
sigmoid.name = 'ClassPredictor/concat/sigmoid'
sigmoid.op = 'Sigmoid'
sigmoid.input.append('ClassPredictor/concat3d')
graph_def.node.extend([sigmoid])
addFlatten(sigmoid.name, sigmoid.name + '/Flatten', graph_def)
detectionOut = NodeDef()
detectionOut.name = 'detection_out'
detectionOut.op = 'DetectionOutput'
if box_predictor == 'convolutional':
detectionOut.input.append('BoxEncodingPredictor/concat')
else:
detectionOut.input.append('BoxPredictor/concat')
detectionOut.input.append(sigmoid.name + '/Flatten')
detectionOut.input.append('PriorBox/concat')
detectionOut.addAttr('num_classes', num_classes + 1)
detectionOut.addAttr('share_location', True)
detectionOut.addAttr('background_label_id', 0)
postProcessing = config['post_processing'][0]
batchNMS = postProcessing['batch_non_max_suppression'][0]
if 'iou_threshold' in batchNMS:
detectionOut.addAttr('nms_threshold', float(batchNMS['iou_threshold'][0]))
else:
detectionOut.addAttr('nms_threshold', 0.6)
if 'score_threshold' in batchNMS:
detectionOut.addAttr('confidence_threshold', float(batchNMS['score_threshold'][0]))
else:
detectionOut.addAttr('confidence_threshold', 0.01)
if 'max_detections_per_class' in batchNMS:
detectionOut.addAttr('top_k', int(batchNMS['max_detections_per_class'][0]))
else:
detectionOut.addAttr('top_k', 100)
if 'max_total_detections' in batchNMS:
detectionOut.addAttr('keep_top_k', int(batchNMS['max_total_detections'][0]))
else:
detectionOut.addAttr('keep_top_k', 100)
detectionOut.addAttr('code_type', "CENTER_SIZE")
graph_def.node.extend([detectionOut])
while True:
unconnectedNodes = getUnconnectedNodes()
unconnectedNodes.remove(detectionOut.name)
if not unconnectedNodes:
break
for name in unconnectedNodes:
for i in range(len(graph_def.node)):
if graph_def.node[i].name == name:
del graph_def.node[i]
break
# Save as text.
graph_def.save(outputPath)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run this script to get a text graph of '
'SSD model from TensorFlow Object Detection API. '
'Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.')
parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.')
parser.add_argument('--output', required=True, help='Path to output text graph.')
parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.')
args = parser.parse_args()
createSSDGraph(args.input, args.config, args.output)
|
the-stack_0_11616 | import datetime
import os
import time
from uuid import UUID, uuid4
from django import forms as django_forms, http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.db.models import Count
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
from django.utils.http import is_safe_url
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
import waffle
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.views import API_TOKEN_COOKIE, logout_user
from olympia.activity.models import ActivityLog, VersionLog
from olympia.activity.utils import log_and_notify
from olympia.addons.models import (
Addon, AddonReviewerFlags, AddonUser, AddonUserPendingConfirmation)
from olympia.addons.views import BaseFilter
from olympia.amo import messages, utils as amo_utils
from olympia.amo.decorators import json_view, login_required, post_required
from olympia.amo.templatetags.jinja_helpers import absolutify, urlparams
from olympia.amo.urlresolvers import get_url_prefix, reverse
from olympia.amo.utils import MenuItem, escape_all, render, send_mail
from olympia.api.models import APIKey, APIKeyConfirmation
from olympia.devhub.decorators import dev_required, no_admin_disabled
from olympia.devhub.models import BlogPost, RssKey
from olympia.devhub.utils import (
add_dynamic_theme_tag, extract_theme_properties,
fetch_existing_translations_from_addon, get_addon_akismet_reports,
UploadRestrictionChecker, wizard_unsupported_properties)
from olympia.files.models import File, FileUpload, FileValidation
from olympia.files.utils import parse_addon
from olympia.lib.crypto.signing import sign_file
from olympia.reviewers.forms import PublicWhiteboardForm
from olympia.reviewers.models import Whiteboard
from olympia.reviewers.templatetags.jinja_helpers import get_position
from olympia.reviewers.utils import ReviewHelper
from olympia.users.models import DeveloperAgreementRestriction
from olympia.versions.models import Version
from olympia.versions.tasks import extract_version_source_to_git
from olympia.versions.utils import get_next_version_number
from olympia.zadmin.models import get_config
from . import feeds, forms, signals, tasks
log = olympia.core.logger.getLogger('z.devhub')
# We use a session cookie to make sure people see the dev agreement.
MDN_BASE = 'https://developer.mozilla.org/en-US/Add-ons'
def get_fileupload_by_uuid_or_404(value):
try:
UUID(value)
except ValueError:
raise http.Http404()
return get_object_or_404(FileUpload, uuid=value)
class AddonFilter(BaseFilter):
opts = (('updated', _(u'Updated')),
('name', _(u'Name')),
('created', _(u'Created')),
('popular', _(u'Downloads')),
('rating', _(u'Rating')))
class ThemeFilter(BaseFilter):
opts = (('created', _(u'Created')),
('name', _(u'Name')),
('popular', _(u'Downloads')),
('rating', _(u'Rating')))
def addon_listing(request, theme=False):
"""Set up the queryset and filtering for addon listing for Dashboard."""
if theme:
qs = Addon.objects.filter(
authors=request.user, type=amo.ADDON_STATICTHEME)
filter_cls = ThemeFilter
default = 'created'
else:
qs = Addon.objects.filter(authors=request.user).exclude(
type=amo.ADDON_STATICTHEME)
filter_cls = AddonFilter
default = 'updated'
filter_ = filter_cls(request, qs, 'sort', default)
return filter_.qs, filter_
def index(request):
ctx = {'blog_posts': _get_posts()}
if request.user.is_authenticated:
user_addons = Addon.objects.filter(authors=request.user)
recent_addons = user_addons.order_by('-modified')[:3]
ctx['recent_addons'] = []
for addon in recent_addons:
ctx['recent_addons'].append({'addon': addon,
'position': get_position(addon)})
return render(request, 'devhub/index.html', ctx)
@login_required
def dashboard(request, theme=False):
addon_items = _get_items(
None, Addon.objects.filter(authors=request.user))[:4]
data = dict(rss=_get_rss_feed(request), blog_posts=_get_posts(),
timestamp=int(time.time()), addon_tab=not theme,
theme=theme, addon_items=addon_items)
if data['addon_tab']:
addons, data['filter'] = addon_listing(request)
data['addons'] = amo_utils.paginate(request, addons, per_page=10)
if theme:
themes, data['filter'] = addon_listing(request, theme=True)
data['themes'] = amo_utils.paginate(request, themes, per_page=10)
if 'filter' in data:
data['sorting'] = data['filter'].field
data['sort_opts'] = data['filter'].opts
return render(request, 'devhub/addons/dashboard.html', data)
@dev_required
def ajax_compat_status(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_status.html',
dict(addon=addon))
@dev_required
def ajax_compat_error(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_error.html',
dict(addon=addon))
@dev_required
def ajax_compat_update(request, addon_id, addon, version_id):
if not addon.accepts_compatible_apps():
raise http.Http404()
version = get_object_or_404(addon.versions.all(), pk=version_id)
compat_form = forms.CompatFormSet(
request.POST or None,
queryset=version.apps.all().select_related('min', 'max'),
form_kwargs={'version': version})
if request.method == 'POST' and compat_form.is_valid():
for compat in compat_form.save(commit=False):
compat.version = version
compat.save()
for compat in compat_form.deleted_objects:
compat.delete()
for form in compat_form.forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
return render(request, 'devhub/addons/ajax_compat_update.html',
dict(addon=addon, version=version, compat_form=compat_form))
def _get_addons(request, addons, addon_id, action):
"""Create a list of ``MenuItem``s for the activity feed."""
items = []
a = MenuItem()
a.selected = (not addon_id)
(a.text, a.url) = (ugettext('All My Add-ons'), reverse('devhub.feed_all'))
if action:
a.url += '?action=' + action
items.append(a)
for addon in addons:
item = MenuItem()
try:
item.selected = (addon_id and addon.id == int(addon_id))
except ValueError:
pass # We won't get here... EVER
url = reverse('devhub.feed', args=[addon.slug])
if action:
url += '?action=' + action
item.text, item.url = addon.name, url
items.append(item)
return items
def _get_posts(limit=5):
return BlogPost.objects.order_by('-date_posted')[0:limit]
def _get_activities(request, action):
url = request.get_full_path()
choices = (None, 'updates', 'status', 'collections', 'reviews')
text = {None: ugettext('All Activity'),
'updates': ugettext('Add-on Updates'),
'status': ugettext('Add-on Status'),
'collections': ugettext('User Collections'),
'reviews': ugettext('User Reviews'),
}
items = []
for c in choices:
i = MenuItem()
i.text = text[c]
i.url, i.selected = urlparams(url, page=None, action=c), (action == c)
items.append(i)
return items
def _get_items(action, addons):
filters = {
'updates': (amo.LOG.ADD_VERSION, amo.LOG.ADD_FILE_TO_VERSION),
'status': (amo.LOG.USER_DISABLE, amo.LOG.USER_ENABLE,
amo.LOG.CHANGE_STATUS, amo.LOG.APPROVE_VERSION,),
'collections': (amo.LOG.ADD_TO_COLLECTION,
amo.LOG.REMOVE_FROM_COLLECTION,),
'reviews': (amo.LOG.ADD_RATING,)
}
filter_ = filters.get(action)
items = (ActivityLog.objects.for_addons(addons)
.exclude(action__in=amo.LOG_HIDE_DEVELOPER))
if filter_:
items = items.filter(action__in=[i.id for i in filter_])
return items
def _get_rss_feed(request):
key, _ = RssKey.objects.get_or_create(user=request.user)
return urlparams(reverse('devhub.feed_all'), privaterss=key.key.hex)
def feed(request, addon_id=None):
if request.GET.get('privaterss'):
return feeds.ActivityFeedRSS()(request)
addon_selected = None
if not request.user.is_authenticated:
return redirect_for_login(request)
else:
addons_all = Addon.objects.filter(authors=request.user)
if addon_id:
addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
addons = addon # common query set
try:
key = RssKey.objects.get(addon=addons)
except RssKey.DoesNotExist:
key = RssKey.objects.create(addon=addons)
addon_selected = addon.id
rssurl = urlparams(reverse('devhub.feed', args=[addon_id]),
privaterss=key.key.hex)
if not acl.check_addon_ownership(request, addons, dev=True,
ignore_disabled=True):
raise PermissionDenied
else:
rssurl = _get_rss_feed(request)
addon = None
addons = addons_all
action = request.GET.get('action')
items = _get_items(action, addons)
activities = _get_activities(request, action)
addon_items = _get_addons(request, addons_all, addon_selected, action)
pager = amo_utils.paginate(request, items, 20)
data = dict(addons=addon_items, pager=pager, activities=activities,
rss=rssurl, addon=addon)
return render(request, 'devhub/addons/activity.html', data)
@dev_required
def edit(request, addon_id, addon):
try:
whiteboard = Whiteboard.objects.get(pk=addon.pk)
except Whiteboard.DoesNotExist:
whiteboard = Whiteboard(pk=addon.pk)
previews = (
addon.current_version.previews.all()
if addon.current_version and addon.has_per_version_previews
else addon.previews.all())
header_preview = (
previews.first() if addon.type == amo.ADDON_STATICTHEME else None)
data = {
'page': 'edit',
'addon': addon,
'whiteboard': whiteboard,
'editable': False,
'show_listed_fields': addon.has_listed_versions(),
'valid_slug': addon.slug,
'tags': addon.tags.not_denied().values_list('tag_text', flat=True),
'previews': previews,
'header_preview': header_preview,
'supported_image_types': amo.SUPPORTED_IMAGE_TYPES,
}
return render(request, 'devhub/addons/edit.html', data)
@dev_required(owner_for_post=True)
@post_required
def delete(request, addon_id, addon):
# Database deletes only allowed for free or incomplete addons.
if not addon.can_be_deleted():
msg = ugettext(
'Add-on cannot be deleted. Disable this add-on instead.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
any_theme = addon.type == amo.ADDON_STATICTHEME
form = forms.DeleteForm(request.POST, addon=addon)
if form.is_valid():
reason = form.cleaned_data.get('reason', '')
addon.delete(msg='Removed via devhub', reason=reason)
messages.success(
request,
ugettext('Theme deleted.')
if any_theme else ugettext('Add-on deleted.'))
return redirect('devhub.%s' % ('themes' if any_theme else 'addons'))
else:
messages.error(
request,
ugettext('URL name was incorrect. Theme was not deleted.')
if any_theme else
ugettext('URL name was incorrect. Add-on was not deleted.'))
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def enable(request, addon_id, addon):
addon.update(disabled_by_user=False)
ActivityLog.create(amo.LOG.USER_ENABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
@post_required
def cancel(request, addon_id, addon):
if addon.status == amo.STATUS_NOMINATED:
addon.update(status=amo.STATUS_NULL)
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon, addon.status)
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if latest_version:
for file_ in latest_version.files.filter(
status=amo.STATUS_AWAITING_REVIEW):
file_.update(status=amo.STATUS_DISABLED)
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def disable(request, addon_id, addon):
# Also set the latest listed version to STATUS_DISABLED if it was
# AWAITING_REVIEW, to not waste reviewers time.
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if latest_version:
latest_version.files.filter(
status=amo.STATUS_AWAITING_REVIEW).update(
status=amo.STATUS_DISABLED)
addon.update_version()
addon.update_status()
addon.update(disabled_by_user=True)
ActivityLog.create(amo.LOG.USER_DISABLE, addon)
return redirect(addon.get_dev_url('versions'))
# Can't use @dev_required, as the user is not a developer yet. Can't use
# @addon_view_factory either, because it requires a developer for unlisted
# add-ons. So we just @login_required and retrieve the addon ourselves in the
# function.
@login_required
def invitation(request, addon_id):
addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
try:
invitation = AddonUserPendingConfirmation.objects.get(
addon=addon, user=request.user)
except AddonUserPendingConfirmation.DoesNotExist:
# To be nice in case the user accidentally visited this page after
# having accepted an invite, redirect to the add-on base edit page.
# If they are an author, they will have access, otherwise will get the
# appropriate error.
return redirect(addon.get_dev_url())
if request.method == 'POST':
value = request.POST.get('accept')
if value == 'yes':
# There is a potential race condition on the position, but it's
# difficult to find a sensible value anyway. Should a position
# conflict happen, owners can easily fix it themselves.
last_position = AddonUser.objects.filter(
addon=invitation.addon).order_by('position').values_list(
'position', flat=True).last() or 0
AddonUser.objects.create(
addon=invitation.addon, user=invitation.user,
role=invitation.role, listed=invitation.listed,
position=last_position + 1)
messages.success(request, ugettext('Invitation accepted.'))
redirect_url = addon.get_dev_url()
else:
messages.success(request, ugettext('Invitation declined.'))
redirect_url = reverse('devhub.addons')
# Regardless of whether or not the invitation was accepted or not,
# it's now obsolete.
invitation.delete()
return redirect(redirect_url)
ctx = {
'addon': addon,
'invitation': invitation,
}
return render(request, 'devhub/addons/invitation.html', ctx)
@dev_required(owner_for_post=True)
def ownership(request, addon_id, addon):
fs = []
ctx = {'addon': addon}
post_data = request.POST if request.method == 'POST' else None
# Authors.
user_form = forms.AuthorFormSet(
post_data,
prefix='user_form',
queryset=AddonUser.objects.filter(addon=addon).order_by('position'),
form_kwargs={'addon': addon})
fs.append(user_form)
ctx['user_form'] = user_form
# Authors pending confirmation (owner can still remove them before they
# accept).
authors_pending_confirmation_form = forms.AuthorWaitingConfirmationFormSet(
post_data,
prefix='authors_pending_confirmation',
queryset=AddonUserPendingConfirmation.objects.filter(
addon=addon).order_by('id'),
form_kwargs={'addon': addon})
fs.append(authors_pending_confirmation_form)
ctx['authors_pending_confirmation_form'] = (
authors_pending_confirmation_form)
# Versions.
license_form = forms.LicenseForm(post_data, version=addon.current_version)
ctx.update(license_form.get_context())
if ctx['license_form']: # if addon has a version
fs.append(ctx['license_form'])
# Policy.
if addon.type != amo.ADDON_STATICTHEME:
policy_form = forms.PolicyForm(post_data, addon=addon)
ctx['policy_form'] = policy_form
fs.append(policy_form)
else:
policy_form = None
def mail_user_changes(author, title, template_part, recipients,
extra_context=None):
from olympia.amo.utils import send_mail
context_data = {
'author': author,
'addon': addon,
'DOMAIN': settings.DOMAIN,
}
if extra_context:
context_data.update(extra_context)
template = loader.get_template(
'users/email/{part}.ltxt'.format(part=template_part))
send_mail(title, template.render(context_data),
None, recipients, use_deny_list=False)
def process_author_changes(source_form, existing_authors_emails):
addon_users_to_process = source_form.save(commit=False)
for addon_user in addon_users_to_process:
action = None
addon_user.addon = addon
if not addon_user.pk:
action = amo.LOG.ADD_USER_WITH_ROLE
mail_user_changes(
author=addon_user,
title=ugettext('An author has been added to your add-on'),
template_part='author_added',
recipients=existing_authors_emails)
mail_user_changes(
author=addon_user,
title=ugettext(
'Author invitation for {addon_name}').format(
addon_name=str(addon.name)),
template_part='author_added_confirmation',
recipients=[addon_user.user.email],
extra_context={'author_confirmation_link': absolutify(
reverse('devhub.addons.invitation', args=(addon.slug,))
)})
messages.success(request, ugettext(
'A confirmation email has been sent to {email}').format(
email=addon_user.user.email))
elif addon_user.role != addon_user._original_role:
action = amo.LOG.CHANGE_USER_WITH_ROLE
title = ugettext(
'An author role has been changed on your add-on')
recipients = list(
set(existing_authors_emails + [addon_user.user.email])
)
mail_user_changes(
author=addon_user,
title=title,
template_part='author_changed',
recipients=recipients)
addon_user.save()
if action:
ActivityLog.create(
action, addon_user.user,
str(addon_user.get_role_display()), addon)
for addon_user in source_form.deleted_objects:
recipients = list(
set(existing_authors_emails + [addon_user.user.email])
)
ActivityLog.create(
amo.LOG.REMOVE_USER_WITH_ROLE, addon_user.user,
str(addon_user.get_role_display()), addon)
mail_user_changes(
author=addon_user,
title=ugettext('An author has been removed from your add-on'),
template_part='author_removed',
recipients=recipients)
addon_user.delete()
if request.method == 'POST' and all([form.is_valid() for form in fs]):
if license_form in fs:
license_form.save()
if policy_form and policy_form in fs:
policy_form.save()
messages.success(request, ugettext('Changes successfully saved.'))
existing_authors_emails = list(
addon.authors.values_list('email', flat=True))
process_author_changes(
authors_pending_confirmation_form, existing_authors_emails)
process_author_changes(
user_form, existing_authors_emails)
return redirect(addon.get_dev_url('owner'))
return render(request, 'devhub/addons/owner.html', ctx)
@login_required
def validate_addon(request):
return render(request, 'devhub/validate_addon.html',
{'title': ugettext('Validate Add-on'),
'new_addon_form': forms.DistributionChoiceForm()})
def handle_upload(filedata, request, channel, addon=None, is_standalone=False,
submit=False):
automated_signing = channel == amo.RELEASE_CHANNEL_UNLISTED
user = request.user if request.user.is_authenticated else None
upload = FileUpload.from_post(
filedata, filedata.name, filedata.size,
automated_signing=automated_signing, addon=addon, user=user)
log.info('FileUpload created: %s' % upload.uuid.hex)
from olympia.lib.akismet.tasks import akismet_comment_check # circ import
if (channel == amo.RELEASE_CHANNEL_LISTED):
existing_data = (
fetch_existing_translations_from_addon(
upload.addon, ('name', 'summary', 'description'))
if addon and addon.has_listed_versions() else ())
akismet_reports = get_addon_akismet_reports(
user=user,
user_agent=request.META.get('HTTP_USER_AGENT'),
referrer=request.META.get('HTTP_REFERER'),
upload=upload,
existing_data=existing_data)
else:
akismet_reports = []
if akismet_reports:
pretask = akismet_comment_check.si(
[report.id for _, report in akismet_reports])
else:
pretask = None
if submit:
tasks.validate_and_submit(
addon, upload, channel=channel, pretask=pretask)
else:
tasks.validate(
upload, listed=(channel == amo.RELEASE_CHANNEL_LISTED),
pretask=pretask)
return upload
@login_required
@post_required
def upload(request, channel='listed', addon=None, is_standalone=False):
channel = amo.CHANNEL_CHOICES_LOOKUP[channel]
filedata = request.FILES['upload']
upload = handle_upload(
filedata=filedata, request=request, addon=addon,
is_standalone=is_standalone, channel=channel)
if addon:
return redirect('devhub.upload_detail_for_version',
addon.slug, upload.uuid.hex)
elif is_standalone:
return redirect('devhub.standalone_upload_detail', upload.uuid.hex)
else:
return redirect('devhub.upload_detail', upload.uuid.hex, 'json')
@post_required
@dev_required
def upload_for_version(request, addon_id, addon, channel):
return upload(request, channel=channel, addon=addon)
@login_required
@json_view
def standalone_upload_detail(request, uuid):
upload = get_fileupload_by_uuid_or_404(uuid)
url = reverse('devhub.standalone_upload_detail', args=[uuid])
return upload_validation_context(request, upload, url=url)
@dev_required(submitting=True)
@json_view
def upload_detail_for_version(request, addon_id, addon, uuid):
try:
upload = get_fileupload_by_uuid_or_404(uuid)
response = json_upload_detail(request, upload, addon_slug=addon.slug)
statsd.incr('devhub.upload_detail_for_addon.success')
return response
except Exception as exc:
statsd.incr('devhub.upload_detail_for_addon.error')
log.error('Error checking upload status: {} {}'.format(type(exc), exc))
raise
@dev_required(allow_reviewers=True)
def file_validation(request, addon_id, addon, file_id):
file_ = get_object_or_404(File, version__addon=addon, id=file_id)
validate_url = reverse('devhub.json_file_validation',
args=[addon.slug, file_.id])
file_url = reverse('files.list', args=[file_.id, 'file', ''])
context = {'validate_url': validate_url, 'file_url': file_url,
'file': file_, 'filename': file_.filename,
'timestamp': file_.created, 'addon': addon,
'automated_signing': file_.automated_signing}
if file_.has_been_validated:
context['validation_data'] = file_.validation.processed_validation
return render(request, 'devhub/validation.html', context)
@csrf_exempt
@dev_required(allow_reviewers=True)
def json_file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, version__addon=addon, id=file_id)
try:
result = file.validation
except FileValidation.DoesNotExist:
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
# This API is, unfortunately, synchronous, so wait for the
# task to complete and return the result directly.
pk = tasks.validate(file, synchronous=True).get()
result = FileValidation.objects.get(pk=pk)
response = JsonResponse({
'validation': result.processed_validation,
'error': None,
})
# See: https://github.com/mozilla/addons-server/issues/11048
response['Access-Control-Allow-Origin'] = settings.CODE_MANAGER_URL
response['Access-Control-Allow-Methods'] = 'GET, OPTIONS'
response['Access-Control-Allow-Headers'] = 'Content-Type'
response['Access-Control-Allow-Credentials'] = 'true'
return response
@json_view
def json_upload_detail(request, upload, addon_slug=None):
addon = None
if addon_slug:
addon = get_object_or_404(Addon.objects, slug=addon_slug)
result = upload_validation_context(request, upload, addon=addon)
if result['validation']:
try:
pkg = parse_addon(upload, addon=addon, user=request.user)
except django_forms.ValidationError as exc:
# Don't add custom validation errors if we already
# failed validation (This can happen because validation does
# call `parse_addon` too.)
if result['validation'].get('errors', 0):
return result
# This doesn't guard against client-side tinkering, and is purely
# to display those non-linter errors nicely in the frontend. What
# does prevent clients from bypassing those is the fact that we
# always call parse_addon() before calling from_upload(), so
# ValidationError would be raised before proceeding.
for i, msg in enumerate(exc.messages):
# Simulate a validation error so the UI displays
# it as such
result['validation']['messages'].insert(
i, {'type': 'error',
'message': escape_all(msg), 'tier': 1,
'fatal': True})
if result['validation']['ending_tier'] < 1:
result['validation']['ending_tier'] = 1
result['validation']['errors'] += 1
return json_view.error(result)
else:
result['addon_type'] = pkg.get('type', '')
return result
def upload_validation_context(request, upload, addon=None, url=None):
if not url:
if addon:
url = reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex])
else:
url = reverse(
'devhub.upload_detail',
args=[upload.uuid.hex, 'json'])
full_report_url = reverse('devhub.upload_detail', args=[upload.uuid.hex])
validation = upload.processed_validation or ''
return {'upload': upload.uuid.hex,
'validation': validation,
'error': None,
'url': url,
'full_report_url': full_report_url}
def upload_detail(request, uuid, format='html'):
upload = get_fileupload_by_uuid_or_404(uuid)
if upload.user_id and not request.user.is_authenticated:
return redirect_for_login(request)
if format == 'json' or request.is_ajax():
try:
response = json_upload_detail(request, upload)
statsd.incr('devhub.upload_detail.success')
return response
except Exception as exc:
statsd.incr('devhub.upload_detail.error')
log.error('Error checking upload status: {} {}'.format(
type(exc), exc))
raise
validate_url = reverse('devhub.standalone_upload_detail',
args=[upload.uuid.hex])
context = {'validate_url': validate_url, 'filename': upload.pretty_name,
'automated_signing': upload.automated_signing,
'timestamp': upload.created}
if upload.validation:
context['validation_data'] = upload.processed_validation
return render(request, 'devhub/validation.html', context)
@dev_required
def addons_section(request, addon_id, addon, section, editable=False):
show_listed = addon.has_listed_versions()
static_theme = addon.type == amo.ADDON_STATICTHEME
models = {}
content_waffle = waffle.switch_is_active('content-optimization')
if show_listed:
models.update({
'describe': (forms.DescribeForm if not content_waffle
else forms.DescribeFormContentOptimization),
'additional_details': forms.AdditionalDetailsForm,
'technical': forms.AddonFormTechnical
})
if not static_theme:
models.update({'media': forms.AddonFormMedia})
else:
models.update({
'describe': (forms.DescribeFormUnlisted if not content_waffle
else forms.DescribeFormUnlistedContentOptimization),
'additional_details': forms.AdditionalDetailsFormUnlisted,
'technical': forms.AddonFormTechnicalUnlisted
})
if section not in models:
raise http.Http404()
tags, previews, restricted_tags = [], [], []
cat_form = dependency_form = whiteboard_form = None
whiteboard = None
if section == 'describe' and show_listed:
category_form_class = (forms.SingleCategoryForm if static_theme else
forms.CategoryFormSet)
cat_form = category_form_class(
request.POST or None, addon=addon, request=request)
elif section == 'additional_details' and show_listed:
tags = addon.tags.not_denied().values_list('tag_text', flat=True)
restricted_tags = addon.tags.filter(restricted=True)
elif section == 'media':
previews = forms.PreviewFormSet(
request.POST or None,
prefix='files', queryset=addon.previews.all())
if section == 'technical':
try:
whiteboard = Whiteboard.objects.get(pk=addon.pk)
except Whiteboard.DoesNotExist:
whiteboard = Whiteboard(pk=addon.pk)
whiteboard_form = PublicWhiteboardForm(request.POST or None,
instance=whiteboard,
prefix='whiteboard')
# Get the slug before the form alters it to the form data.
valid_slug = addon.slug
if editable:
if request.method == 'POST':
form = models[section](request.POST, request.FILES,
instance=addon, request=request)
if form.is_valid() and (not previews or previews.is_valid()):
addon = form.save(addon)
if previews:
for preview in previews.forms:
preview.save(addon)
editable = False
if section == 'media':
ActivityLog.create(amo.LOG.CHANGE_ICON, addon)
else:
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
valid_slug = addon.slug
if cat_form:
if cat_form.is_valid():
cat_form.save()
else:
editable = True
if dependency_form:
if dependency_form.is_valid():
dependency_form.save()
else:
editable = True
if whiteboard_form:
if whiteboard_form.is_valid():
whiteboard_form.save()
else:
editable = True
else:
form = models[section](instance=addon, request=request)
else:
form = False
data = {
'addon': addon,
'whiteboard': whiteboard,
'show_listed_fields': show_listed,
'form': form,
'editable': editable,
'tags': tags,
'restricted_tags': restricted_tags,
'cat_form': cat_form,
'preview_form': previews,
'dependency_form': dependency_form,
'whiteboard_form': whiteboard_form,
'valid_slug': valid_slug,
'supported_image_types': amo.SUPPORTED_IMAGE_TYPES,
}
return render(request, 'devhub/addons/edit/%s.html' % section, data)
@never_cache
@dev_required
@json_view
def image_status(request, addon_id, addon):
# Default icon needs no checking.
if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon':
icons = True
else:
icons = storage.exists(os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id))
previews = all(storage.exists(p.thumbnail_path)
for p in addon.previews.all())
return {'overall': icons and previews,
'icons': icons,
'previews': previews}
@dev_required
@json_view
def upload_image(request, addon_id, addon, upload_type):
errors = []
upload_hash = ''
if 'upload_image' in request.FILES:
upload_preview = request.FILES['upload_image']
upload_preview.seek(0)
upload_hash = uuid4().hex
loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash)
with storage.open(loc, 'wb') as fd:
for chunk in upload_preview:
fd.write(chunk)
is_icon = upload_type == 'icon'
is_preview = upload_type == 'preview'
image_check = amo_utils.ImageCheck(upload_preview)
is_animated = image_check.is_animated() # will also cache .is_image()
if (upload_preview.content_type not in amo.IMG_TYPES or
not image_check.is_image()):
if is_icon:
errors.append(ugettext('Icons must be either PNG or JPG.'))
else:
errors.append(ugettext('Images must be either PNG or JPG.'))
if is_animated:
if is_icon:
errors.append(ugettext('Icons cannot be animated.'))
else:
errors.append(ugettext('Images cannot be animated.'))
if is_icon:
max_size = settings.MAX_ICON_UPLOAD_SIZE
else:
max_size = None
if max_size and upload_preview.size > max_size:
if is_icon:
errors.append(
ugettext('Please use images smaller than %dMB.')
% (max_size // 1024 // 1024))
content_waffle = waffle.switch_is_active('content-optimization')
if image_check.is_image() and content_waffle and is_preview:
min_size = amo.ADDON_PREVIEW_SIZES.get('min')
# * 100 to get a nice integer to compare against rather than 1.3333
required_ratio = min_size[0] * 100 // min_size[1]
actual_size = image_check.size
actual_ratio = actual_size[0] * 100 // actual_size[1]
if actual_size[0] < min_size[0] or actual_size[1] < min_size[1]:
# L10n: {0} is an image width (in pixels), {1} is a height.
errors.append(
ugettext('Image must be at least {0} pixels wide and {1} '
'pixels tall.').format(min_size[0], min_size[1]))
if actual_ratio != required_ratio:
errors.append(
ugettext('Image dimensions must be in the ratio 4:3.'))
if image_check.is_image() and content_waffle and is_icon:
standard_size = amo.ADDON_ICON_SIZES[-1]
icon_size = image_check.size
if icon_size[0] < standard_size or icon_size[1] < standard_size:
# L10n: {0} is an image width/height (in pixels).
errors.append(
ugettext(u'Icon must be at least {0} pixels wide and '
u'tall.').format(standard_size))
if icon_size[0] != icon_size[1]:
errors.append(
ugettext(u'Icon must be square (same width and height).'))
if errors and is_preview and os.path.exists(loc):
# Delete the temporary preview file in case of error.
os.unlink(loc)
else:
errors.append(ugettext('There was an error uploading your preview.'))
if errors:
upload_hash = ''
return {'upload_hash': upload_hash, 'errors': errors}
@dev_required
def version_edit(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), pk=version_id)
static_theme = addon.type == amo.ADDON_STATICTHEME
version_form = forms.VersionForm(
request.POST or None,
request.FILES or None,
instance=version,
request=request,
) if not static_theme else None
data = {}
if version_form:
data['version_form'] = version_form
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
if not static_theme and addon.accepts_compatible_apps():
qs = version.apps.all().select_related('min', 'max')
compat_form = forms.CompatFormSet(
request.POST or None, queryset=qs,
form_kwargs={'version': version})
data['compat_form'] = compat_form
if (request.method == 'POST' and
all([form.is_valid() for form in data.values()])):
if 'compat_form' in data:
for compat in data['compat_form'].save(commit=False):
compat.version = version
compat.save()
for compat in data['compat_form'].deleted_objects:
compat.delete()
for form in data['compat_form'].forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
if 'version_form' in data:
# VersionForm.save() clear the pending info request if the
# developer specifically asked for it, but we've got additional
# things to do here that depend on it.
had_pending_info_request = bool(addon.pending_info_request)
data['version_form'].save()
if 'approval_notes' in version_form.changed_data:
if had_pending_info_request:
log_and_notify(amo.LOG.APPROVAL_NOTES_CHANGED, None,
request.user, version)
else:
ActivityLog.create(amo.LOG.APPROVAL_NOTES_CHANGED,
addon, version, request.user)
if ('source' in version_form.changed_data and
version_form.cleaned_data['source']):
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults={'needs_admin_code_review': True})
commit_to_git = waffle.switch_is_active(
'enable-uploads-commit-to-git-storage')
if commit_to_git:
# Extract into git repository
extract_version_source_to_git.delay(
version_id=data['version_form'].instance.pk,
author_id=request.user.pk)
if had_pending_info_request:
log_and_notify(amo.LOG.SOURCE_CODE_UPLOADED, None,
request.user, version)
else:
ActivityLog.create(amo.LOG.SOURCE_CODE_UPLOADED,
addon, version, request.user)
messages.success(request, ugettext('Changes successfully saved.'))
return redirect('devhub.versions.edit', addon.slug, version_id)
data.update({
'addon': addon,
'version': version,
'is_admin': is_admin,
'choices': File.STATUS_CHOICES,
'files': version.files.all()})
return render(request, 'devhub/versions/edit.html', data)
def _log_max_version_change(addon, version, appversion):
details = {'version': version.version,
'target': appversion.version.version,
'application': appversion.application}
ActivityLog.create(amo.LOG.MAX_APPVERSION_UPDATED,
addon, version, details=details)
@dev_required
@post_required
@transaction.atomic
def version_delete(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(addon.versions.all(), pk=version_id)
if (addon.is_recommended and
version.recommendation_approved and
version == addon.current_version):
# Developers shouldn't be able to delete/disable the current version
# of an approved add-on.
msg = ugettext('The latest approved version of a Recommended extension'
' cannot be deleted or disabled. Please contact AMO '
'Admins if you need help with this.')
messages.error(request, msg)
elif 'disable_version' in request.POST:
messages.success(
request,
ugettext('Version %s disabled.') % version.version)
version.is_user_disabled = True # Will update the files/activity log.
version.addon.update_status()
else:
messages.success(
request,
ugettext('Version %s deleted.') % version.version)
version.delete() # Will also activity log.
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
@transaction.atomic
def version_reenable(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(addon.versions.all(), pk=version_id)
messages.success(
request,
ugettext('Version %s re-enabled.') % version.version)
version.is_user_disabled = False # Will update the files/activity log.
version.addon.update_status()
return redirect(addon.get_dev_url('versions'))
def check_validation_override(request, form, addon, version):
if version and form.cleaned_data.get('admin_override_validation'):
helper = ReviewHelper(request=request, addon=addon, version=version)
helper.set_data({
'operating_systems': '',
'applications': '',
'comments': ugettext(
u'This upload has failed validation, and may '
u'lack complete validation results. Please '
u'take due care when reviewing it.')})
helper.actions['super']['method']()
def auto_sign_file(file_):
"""If the file should be automatically reviewed and signed, do it."""
addon = file_.version.addon
if file_.is_experiment: # See bug 1220097.
ActivityLog.create(amo.LOG.EXPERIMENT_SIGNED, file_)
sign_file(file_)
elif file_.version.channel == amo.RELEASE_CHANNEL_UNLISTED:
# Sign automatically without manual review.
helper = ReviewHelper(request=None, addon=addon,
version=file_.version)
# Provide the file to review/sign to the helper.
helper.set_data({'addon_files': [file_],
'comments': 'automatic validation'})
helper.handler.process_public()
ActivityLog.create(amo.LOG.UNLISTED_SIGNED, file_)
def auto_sign_version(version, **kwargs):
# Sign all the unapproved files submitted, one for each platform.
for file_ in version.files.exclude(status=amo.STATUS_APPROVED):
auto_sign_file(file_, **kwargs)
@dev_required
def version_list(request, addon_id, addon):
qs = addon.versions.order_by('-created')
versions = amo_utils.paginate(request, qs)
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
token = request.COOKIES.get(API_TOKEN_COOKIE, None)
data = {'addon': addon,
'versions': versions,
'token': token,
'is_admin': is_admin}
return render(request, 'devhub/versions/list.html', data)
@dev_required
def version_bounce(request, addon_id, addon, version):
# Use filter since there could be dupes.
vs = addon.versions.filter(version=version).order_by('-created').first()
if vs:
return redirect('devhub.versions.edit', addon.slug, vs.id)
else:
raise http.Http404()
@json_view
@dev_required
def version_stats(request, addon_id, addon):
qs = addon.versions.all()
reviews = (qs.annotate(review_count=Count('ratings'))
.values('id', 'version', 'review_count'))
data = {v['id']: v for v in reviews}
files = (
qs.annotate(file_count=Count('files')).values_list('id', 'file_count'))
for id_, file_count in files:
# For backwards compatibility
data[id_]['files'] = file_count
data[id_]['reviews'] = data[id_].pop('review_count')
return data
@login_required
def submit_addon(request):
return render_agreement(
request=request,
template='devhub/addons/submit/start.html',
next_step='devhub.submit.distribution',
)
@dev_required
def submit_version_agreement(request, addon_id, addon):
return render_agreement(
request=request,
template='devhub/addons/submit/start.html',
next_step=reverse('devhub.submit.version', args=(addon.slug,)),
submit_page='version',
)
@transaction.atomic
def _submit_distribution(request, addon, next_view):
# Accept GET for the first load so we can preselect the channel.
form = forms.DistributionChoiceForm(
request.POST if request.method == 'POST' else
request.GET if request.GET.get('channel') else None)
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
args = [addon.slug] if addon else []
args.append(data['channel'])
return redirect(next_view, *args)
return render(request, 'devhub/addons/submit/distribute.html',
{'distribution_form': form,
'submit_notification_warning':
get_config('submit_notification_warning'),
'submit_page': 'version' if addon else 'addon'})
@login_required
def submit_addon_distribution(request):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.agreement')
return _submit_distribution(request, None, 'devhub.submit.upload')
@dev_required(submitting=True)
def submit_version_distribution(request, addon_id, addon):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
return _submit_distribution(request, addon, 'devhub.submit.version.upload')
WIZARD_COLOR_FIELDS = [
('frame',
_(u'Header area background'),
_(u'The color of the header area background, displayed in the part of '
u'the header not covered or visible through the header image. Manifest '
u'field: frame.'),
'rgba(229,230,232,1)'),
('tab_background_text',
_(u'Header area text and icons'),
_(u'The color of the text and icons in the header area, except the '
u'active tab. Manifest field: tab_background_text.'),
'rgba(0,0,0,1'),
('toolbar',
_(u'Toolbar area background'),
_(u'The background color for the navigation bar, the bookmarks bar, and '
u'the selected tab. Manifest field: toolbar.'),
False),
('bookmark_text',
_(u'Toolbar area text and icons'),
_(u'The color of the text and icons in the toolbar and the active tab. '
u'Manifest field: bookmark_text.'),
False),
('toolbar_field',
_(u'Toolbar field area background'),
_(u'The background color for fields in the toolbar, such as the URL bar. '
u'Manifest field: toolbar_field.'),
False),
('toolbar_field_text',
_(u'Toolbar field area text'),
_(u'The color of text in fields in the toolbar, such as the URL bar. '
u'Manifest field: toolbar_field_text.'),
False)
]
@transaction.atomic
def _submit_upload(request, addon, channel, next_view, wizard=False):
""" If this is a new addon upload `addon` will be None.
next_view is the view that will be redirected to.
"""
form = forms.NewUploadForm(
request.POST or None,
request.FILES or None,
addon=addon,
request=request
)
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
if addon:
version = Version.from_upload(
upload=data['upload'],
addon=addon,
selected_apps=data['compatible_apps'],
channel=channel,
parsed_data=data['parsed_data'])
url_args = [addon.slug, version.id]
else:
addon = Addon.from_upload(
upload=data['upload'],
channel=channel,
selected_apps=data['compatible_apps'],
parsed_data=data['parsed_data'],
user=request.user)
version = addon.find_latest_version(channel=channel)
url_args = [addon.slug]
check_validation_override(request, form, addon, version)
if (addon.status == amo.STATUS_NULL and
addon.has_complete_metadata() and
channel == amo.RELEASE_CHANNEL_LISTED):
addon.update(status=amo.STATUS_NOMINATED)
# auto-sign versions (the method checks eligibility)
auto_sign_version(version)
add_dynamic_theme_tag(version)
return redirect(next_view, *url_args)
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
if addon:
channel_choice_text = (forms.DistributionChoiceForm().LISTED_LABEL
if channel == amo.RELEASE_CHANNEL_LISTED else
forms.DistributionChoiceForm().UNLISTED_LABEL)
else:
channel_choice_text = '' # We only need this for Version upload.
submit_page = 'version' if addon else 'addon'
template = ('devhub/addons/submit/upload.html' if not wizard else
'devhub/addons/submit/wizard.html')
existing_properties = (
extract_theme_properties(addon, channel)
if wizard and addon else {})
unsupported_properties = (
wizard_unsupported_properties(
existing_properties,
[field for field, _, _, _ in WIZARD_COLOR_FIELDS])
if existing_properties else [])
return render(request, template,
{'new_addon_form': form,
'is_admin': is_admin,
'addon': addon,
'submit_notification_warning':
get_config('submit_notification_warning'),
'submit_page': submit_page,
'channel': channel,
'channel_choice_text': channel_choice_text,
'existing_properties': existing_properties,
'colors': WIZARD_COLOR_FIELDS,
'unsupported_properties': unsupported_properties,
'version_number':
get_next_version_number(addon) if wizard else None})
@login_required
def submit_addon_upload(request, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.agreement')
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, None, channel_id, 'devhub.submit.source')
@dev_required(submitting=True)
@no_admin_disabled
def submit_version_upload(request, addon_id, addon, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, addon, channel_id, 'devhub.submit.version.source')
@dev_required
@no_admin_disabled
def submit_version_auto(request, addon_id, addon):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
# choose the channel we need from the last upload
last_version = addon.find_latest_version(None, exclude=())
if not last_version:
return redirect('devhub.submit.version.distribution', addon.slug)
channel = last_version.channel
return _submit_upload(
request, addon, channel, 'devhub.submit.version.source')
@login_required
def submit_addon_theme_wizard(request, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.agreement')
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, None, channel_id, 'devhub.submit.source', wizard=True)
@dev_required
@no_admin_disabled
def submit_version_theme_wizard(request, addon_id, addon, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, addon, channel_id, 'devhub.submit.version.source',
wizard=True)
def _submit_source(request, addon, version, next_view):
redirect_args = [addon.slug, version.pk] if version else [addon.slug]
if addon.type != amo.ADDON_EXTENSION:
return redirect(next_view, *redirect_args)
latest_version = version or addon.find_latest_version(channel=None)
form = forms.SourceForm(
request.POST or None,
request.FILES or None,
instance=latest_version,
request=request)
if request.method == 'POST' and form.is_valid():
if form.cleaned_data.get('source'):
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults={'needs_admin_code_review': True})
activity_log = ActivityLog.objects.create(
action=amo.LOG.SOURCE_CODE_UPLOADED.id,
user=request.user,
details={
'comments': (u'This version has been automatically '
u'flagged for admin review, as it had source '
u'files attached when submitted.')})
VersionLog.objects.create(
version_id=latest_version.id, activity_log=activity_log)
form.save()
# We can extract the actual source file only after the form
# has been saved because the file behind it may not have been
# written to disk yet (e.g for in-memory uploads)
if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
extract_version_source_to_git.delay(
version_id=form.instance.pk,
author_id=request.user.pk)
return redirect(next_view, *redirect_args)
context = {
'form': form,
'addon': addon,
'version': version,
'submit_page': 'version' if version else 'addon',
}
return render(request, 'devhub/addons/submit/source.html', context)
@dev_required(submitting=True)
def submit_addon_source(request, addon_id, addon):
return _submit_source(request, addon, None, 'devhub.submit.details')
@dev_required(submitting=True)
def submit_version_source(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), id=version_id)
return _submit_source(
request, addon, version, 'devhub.submit.version.details')
def _submit_details(request, addon, version):
static_theme = addon.type == amo.ADDON_STATICTHEME
if version:
skip_details_step = (version.channel == amo.RELEASE_CHANNEL_UNLISTED or
(static_theme and addon.has_complete_metadata()))
if skip_details_step:
# Nothing to do here.
return redirect(
'devhub.submit.version.finish', addon.slug, version.pk)
latest_version = version
else:
# Figure out the latest version early in order to pass the same
# instance to each form that needs it (otherwise they might overwrite
# each other).
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if not latest_version:
# No listed version ? Then nothing to do in the listed submission
# flow.
return redirect('devhub.submit.finish', addon.slug)
forms_list = []
context = {
'addon': addon,
'version': version,
'sources_provided': latest_version.sources_provided,
'submit_page': 'version' if version else 'addon',
}
post_data = request.POST if request.method == 'POST' else None
show_all_fields = not version or not addon.has_complete_metadata()
if show_all_fields:
if waffle.switch_is_active('content-optimization'):
describe_form = forms.DescribeFormContentOptimization(
post_data, instance=addon, request=request, version=version,
should_auto_crop=True)
else:
describe_form = forms.DescribeForm(
post_data, instance=addon, request=request, version=version)
cat_form_class = (forms.CategoryFormSet if not static_theme
else forms.SingleCategoryForm)
cat_form = cat_form_class(post_data, addon=addon, request=request)
policy_form = forms.PolicyForm(post_data, addon=addon)
license_form = forms.LicenseForm(
post_data, version=latest_version, prefix='license')
context.update(license_form.get_context())
context.update(
form=describe_form,
cat_form=cat_form,
policy_form=policy_form)
forms_list.extend([
describe_form,
cat_form,
policy_form,
context['license_form']
])
if not static_theme:
# Static themes don't need this form
reviewer_form = forms.VersionForm(
post_data, instance=latest_version, request=request)
context.update(reviewer_form=reviewer_form)
forms_list.append(reviewer_form)
if request.method == 'POST' and all(
form.is_valid() for form in forms_list):
if show_all_fields:
addon = describe_form.save()
cat_form.save()
policy_form.save()
license_form.save(log=False)
if not static_theme:
reviewer_form.save()
if addon.status == amo.STATUS_NULL:
addon.update(status=amo.STATUS_NOMINATED)
signals.submission_done.send(sender=addon)
elif not static_theme:
reviewer_form.save()
if not version:
return redirect('devhub.submit.finish', addon.slug)
else:
return redirect('devhub.submit.version.finish',
addon.slug, version.id)
template = 'devhub/addons/submit/%s' % (
'describe.html' if show_all_fields else 'describe_minimal.html')
return render(request, template, context)
@dev_required(submitting=True)
def submit_addon_details(request, addon_id, addon):
return _submit_details(request, addon, None)
@dev_required(submitting=True)
def submit_version_details(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), id=version_id)
return _submit_details(request, addon, version)
def _submit_finish(request, addon, version):
uploaded_version = version or addon.versions.latest()
try:
author = addon.authors.all()[0]
except IndexError:
# This should never happen.
author = None
if (not version and author and
uploaded_version.channel == amo.RELEASE_CHANNEL_LISTED and
not Version.objects.exclude(pk=uploaded_version.pk)
.filter(addon__authors=author,
channel=amo.RELEASE_CHANNEL_LISTED)
.exclude(addon__status=amo.STATUS_NULL)
.exists()):
# If that's the first time this developer has submitted an listed addon
# (no other listed Version by this author exists) send them a welcome
# email.
# We can use locale-prefixed URLs because the submitter probably
# speaks the same language by the time he/she reads the email.
context = {
'addon_name': str(addon.name),
'app': str(request.APP.pretty),
'detail_url': absolutify(addon.get_url_path()),
'version_url': absolutify(addon.get_dev_url('versions')),
'edit_url': absolutify(addon.get_dev_url('edit')),
}
tasks.send_welcome_email.delay(addon.id, [author.email], context)
submit_page = 'version' if version else 'addon'
return render(request, 'devhub/addons/submit/done.html',
{'addon': addon,
'uploaded_version': uploaded_version,
'submit_page': submit_page,
'preview': uploaded_version.previews.first()})
@dev_required(submitting=True)
def submit_addon_finish(request, addon_id, addon):
# Bounce to the details step if incomplete
if (not addon.has_complete_metadata() and
addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)):
return redirect('devhub.submit.details', addon.slug)
# Bounce to the versions page if they don't have any versions.
if not addon.versions.exists():
return redirect('devhub.submit.version', addon.slug)
return _submit_finish(request, addon, None)
@dev_required
def submit_version_finish(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), id=version_id)
return _submit_finish(request, addon, version)
@dev_required
@post_required
def remove_locale(request, addon_id, addon):
POST = request.POST
if 'locale' in POST and POST['locale'] != addon.default_locale:
addon.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
@dev_required
@post_required
def request_review(request, addon_id, addon):
if not addon.can_request_review():
return http.HttpResponseBadRequest()
latest_version = addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED,
exclude=())
if latest_version:
for f in latest_version.files.filter(status=amo.STATUS_DISABLED):
f.update(status=amo.STATUS_AWAITING_REVIEW)
# Clear the nomination date so it gets set again in Addon.watch_status.
latest_version.update(nomination=None)
if addon.has_complete_metadata():
addon.update(status=amo.STATUS_NOMINATED)
messages.success(request, ugettext('Review requested.'))
else:
messages.success(request, _(
'You must provide further details to proceed.'))
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon, addon.status)
return redirect(addon.get_dev_url('versions'))
def docs(request, doc_name=None):
mdn_docs = {
None: '',
'getting-started': '',
'reference': '',
'how-to': '',
'how-to/getting-started': '',
'how-to/extension-development': '#Extensions',
'how-to/other-addons': '#Other_types_of_add-ons',
'how-to/thunderbird-mobile': '#Application-specific',
'how-to/theme-development': '#Themes',
'themes': '/Themes/Background',
'themes/faq': '/Themes/Background/FAQ',
'policies': '/AMO/Policy',
'policies/reviews': '/AMO/Policy/Reviews',
'policies/contact': '/AMO/Policy/Contact',
'policies/agreement': '/AMO/Policy/Agreement',
}
if doc_name in mdn_docs:
return redirect(MDN_BASE + mdn_docs[doc_name],
permanent=True)
raise http.Http404()
@login_required
def api_key_agreement(request):
return render_agreement(
request=request,
template='devhub/api/agreement.html',
next_step='devhub.api_key',
)
def render_agreement(request, template, next_step, **extra_context):
form = forms.AgreementForm(
request.POST if request.method == 'POST' else None,
request=request
)
if request.method == 'POST' and form.is_valid():
# Developer has validated the form: let's update its profile and
# redirect to next step. Note that the form is supposed to always be
# invalid if submission is not allowed for this request.
data = {
'read_dev_agreement': datetime.datetime.now(),
}
if 'display_name' in form.cleaned_data:
data['display_name'] = form.cleaned_data['display_name']
request.user.update(**data)
return redirect(next_step)
elif not UploadRestrictionChecker(request).is_submission_allowed():
# Developer has either posted an invalid form or just landed on the
# page but haven't read the agreement yet, or isn't allowed to submit
# for some other reason (denied ip/email): show the form (with
# potential errors highlighted)
context = {
'agreement_form': form,
'agreement_message': str(
DeveloperAgreementRestriction.error_message
),
}
context.update(extra_context)
return render(request, template, context)
else:
# The developer has already read the agreement, we should just redirect
# to the next step.
response = redirect(next_step)
return response
@login_required
@transaction.atomic
def api_key(request):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect(reverse('devhub.api_key_agreement'))
try:
credentials = APIKey.get_jwt_key(user=request.user)
except APIKey.DoesNotExist:
credentials = None
try:
confirmation = APIKeyConfirmation.objects.get(
user=request.user)
except APIKeyConfirmation.DoesNotExist:
confirmation = None
if request.method == 'POST':
has_confirmed_or_is_confirming = confirmation and (
confirmation.confirmed_once or confirmation.is_token_valid(
request.POST.get('confirmation_token'))
)
# Revoking credentials happens regardless of action, if there were
# credentials in the first place.
if (credentials and
request.POST.get('action') in ('revoke', 'generate')):
credentials.update(is_active=None)
log.info('revoking JWT key for user: {}, {}'
.format(request.user.id, credentials))
send_key_revoked_email(request.user.email, credentials.key)
msg = ugettext(
'Your old credentials were revoked and are no longer valid.')
messages.success(request, msg)
# If trying to generate with no confirmation instance, we don't
# generate the keys immediately but instead send you an email to
# confirm the generation of the key. This should only happen once per
# user, unless the instance is deleted by admins to reset the process
# for that user.
if confirmation is None and request.POST.get('action') == 'generate':
confirmation = APIKeyConfirmation.objects.create(
user=request.user, token=APIKeyConfirmation.generate_token())
confirmation.send_confirmation_email()
# If you have a confirmation instance, you need to either have it
# confirmed once already or have the valid token proving you received
# the email.
elif (has_confirmed_or_is_confirming and
request.POST.get('action') == 'generate'):
confirmation.update(confirmed_once=True)
new_credentials = APIKey.new_jwt_credentials(request.user)
log.info('new JWT key created: {}'.format(new_credentials))
send_key_change_email(request.user.email, new_credentials.key)
else:
# If we land here, either confirmation token is invalid, or action
# is invalid, or state is outdated (like user trying to revoke but
# there are already no credentials).
# We can just pass and let the redirect happen.
pass
# In any case, redirect after POST.
return redirect(reverse('devhub.api_key'))
context_data = {
'title': ugettext('Manage API Keys'),
'credentials': credentials,
'confirmation': confirmation,
'token': request.GET.get('token') # For confirmation step.
}
return render(request, 'devhub/api/key.html', context_data)
def send_key_change_email(to_email, key):
template = loader.get_template('devhub/email/new-key-email.ltxt')
url = absolutify(reverse('devhub.api_key'))
send_mail(
ugettext('New API key created'),
template.render({'key': key, 'url': url}),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
def send_key_revoked_email(to_email, key):
template = loader.get_template('devhub/email/revoked-key-email.ltxt')
url = absolutify(reverse('devhub.api_key'))
send_mail(
ugettext('API key revoked'),
template.render({'key': key, 'url': url}),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
@dev_required
@json_view
def theme_background_image(request, addon_id, addon, channel):
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
version = addon.find_latest_version(channel_id)
return (version.get_background_images_encoded(header_only=True) if version
else {})
def _clean_next_url(request):
gets = request.GET.copy()
url = gets.get('to', settings.LOGIN_REDIRECT_URL)
if not is_safe_url(url, allowed_hosts=(settings.DOMAIN,)):
log.info(u'Unsafe redirect to %s' % url)
url = settings.LOGIN_REDIRECT_URL
domain = gets.get('domain', None)
if domain in settings.VALID_LOGIN_REDIRECTS.keys():
url = settings.VALID_LOGIN_REDIRECTS[domain] + url
gets['to'] = url
request.GET = gets
return request
def logout(request):
user = request.user
if not user.is_anonymous:
log.debug(u"User (%s) logged out" % user)
if 'to' in request.GET:
request = _clean_next_url(request)
next_url = request.GET.get('to')
if not next_url:
next_url = settings.LOGOUT_REDIRECT_URL
prefixer = get_url_prefix()
if prefixer:
next_url = prefixer.fix(next_url)
response = http.HttpResponseRedirect(next_url)
logout_user(request, response)
return response
|
the-stack_0_11619 | """LSTM Controller."""
import torch
from torch import nn
from torch.nn import Parameter
import numpy as np
# torch.set_default_tensor_type('torch.cuda.FloatTensor')
class LSTMController(nn.Module):
"""An NTM controller based on LSTM."""
def __init__(self, num_inputs, num_outputs, num_layers):
super(LSTMController, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size=num_inputs,
hidden_size=num_outputs,
num_layers=num_layers)
# The hidden state is a learned parameter
self.lstm_h_bias = Parameter(torch.randn(self.num_layers, 1, self.num_outputs) * 0.05)
self.lstm_c_bias = Parameter(torch.randn(self.num_layers, 1, self.num_outputs) * 0.05)
self.reset_parameters()
def create_new_state(self, batch_size):
# Dimension: (num_layers * num_directions, batch, hidden_size)
lstm_h = self.lstm_h_bias.clone().repeat(1, batch_size, 1)
lstm_c = self.lstm_c_bias.clone().repeat(1, batch_size, 1)
return lstm_h, lstm_c
def reset_parameters(self):
for p in self.lstm.parameters():
if p.dim() == 1:
nn.init.constant_(p, 0)
else:
stdev = 5 / (np.sqrt(self.num_inputs + self.num_outputs))
nn.init.uniform_(p, -stdev, stdev)
def size(self):
return self.num_inputs, self.num_outputs
def forward(self, x, prev_state):
x = x.unsqueeze(0)
outp, state = self.lstm(x, prev_state)
return outp.squeeze(0), state
|
the-stack_0_11620 | import requests
import os
import zipfile
def unzipper(file_path, dirname):
with zipfile.ZipFile(file_path) as zf:
files = zf.namelist()
zf.extractall(dirname)
def download_http(url, file_path):
r = requests.get(url)
with open(file_path, "wb") as f:
f.write(r.content)
def download(url, file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
print("Starting request for file: " + file_path)
# start http request
if url.startswith('http://'):
return download_http(url, file_path)
# downlaod file form ftp
elif url.startswith('ftp://'):
return self.download_ftp(url, file_path)
|
the-stack_0_11624 | '''
OpenIMU SPI package version 0.2.0.
-pip install spidev3.4,
-read package through SPI interface, OpenIMU330BI test on Pi3 board(Raspbian OS,Raspberry 3B+).
-Spi slave: OpenIMU 330 EVK
-Pins connection:
Pi3 330/300 evk
miso <==> miso
mosi <==> mosi
sck <==> sck
gpio(bcm4) <==> cs black line
gpio(bcm17) <==> drdy red line
gnd <==> gnd
bcm 27 nRST
@cek from Aceinna 2019.11.4
'''
import os
import sys
import spidev
import time, math
import struct
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error import RPi.GPIO!")
import traceback
from gpio import *
class SpiOpenIMU:
def __init__(self, target_module = "300", fw='0.0', cs_pin = 4, interrupt_pin = 17, drdy_status = False):
'''
pin number use the BCM code
'''
self.spi = spidev.SpiDev()
self.cs_channel = cs_pin
self.interrupt_channel = interrupt_pin
self.drdy = drdy_status
self.speed = 1000000 # 1M
self.delay = 0 #ns
self.word = 8 #硬件限制为8位
self.fw_version = fw
self.power = aceinna_gpio(use_gpio=True)# bcm gpio rst EVK power
self.gpio_setting()
self.spidev_setting()
self.check_settings()
time.sleep(0.1)
self.module = target_module
print("initialize based on: %s, with DRDY_usage: %s" % (self.module, self.drdy))
def gpio_setting(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.cs_channel,GPIO.OUT)
GPIO.output(self.cs_channel,GPIO.HIGH) # used as CS line replace CS0 default in Pi3 board
if self.drdy:
GPIO.setup(self.interrupt_channel,GPIO.IN) # channel used as IMU data ready detection
time.sleep(0.4)
GPIO.add_event_detect(self.interrupt_channel,GPIO.FALLING)
return True
def single_read(self, target_register):
# if self.drdy and self.module != "300":
# while not GPIO.event_detected(self.interrupt_channel):
# pass
if self.module == "381":
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
self.spi.xfer2([target_register,0x00],self.speed,self.speed) #return data of 0000
GPIO.output(self.cs_channel,GPIO.HIGH)
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
resp_single = self.spi.xfer2([0x00,0x00],self.speed,self.speed) #receive the back target data
GPIO.output(self.cs_channel,GPIO.HIGH)
return self.combine_reg('>h', resp_single[0],resp_single[1])
else:
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
resp_single = self.spi.xfer2([target_register,0x00,0x00,0x00],self.speed,self.speed)
GPIO.output(self.cs_channel,GPIO.HIGH)
print("SPI raw read:", hex(resp_single[2]), hex(resp_single[3]))
return self.combine_reg('>h', resp_single[2],resp_single[3])
def single_write(self, target_register, target_data):
# if self.drdy and self.module != "300":
# while not GPIO.event_detected(self.interrupt_channel):
# pass
GPIO.output(self.cs_channel,GPIO.LOW)
temp_reg = target_register | 0x80
self.spi.xfer2([temp_reg, target_data],self.speed,self.speed) #write data, such as 0xF010, target address is 0x70, and data input is 0x10
GPIO.output(self.cs_channel,GPIO.HIGH)
return True
def burst_read(self, first_register, subregister_num, sratm_fac):
'''
sratm_fac={"rate":[0.005, 0], "accel":[0.25, 0]}
status, rate, accel, temp, mag factors dict
'''
sts, rate, acc, deg, tmstp, temp, mag = [], [], [], [], [], [], []
# if self.drdy and self.module != "300": # 300 no drdy now, so only not 300 will go next
# while not GPIO.event_detected(self.interrupt_channel):
# pass
while (not GPIO.event_detected(self.interrupt_channel)) and self.drdy:
pass
if "381" in self.module:
GPIO.output(self.cs_channel,GPIO.LOW)
resp = self.spi.xfer2([first_register,0x00],self.speed,self.speed)
GPIO.output(self.cs_channel,GPIO.HIGH)
for i_381 in range(subregister_num):
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
resp += self.spi.xfer2([0x00,0x00],self.speed,self.speed)[:]
GPIO.output(self.cs_channel,GPIO.HIGH)
#unit:degree per second
rate.append(self.combine_reg('>h', resp[4],resp[5]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[6],resp[7]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[8],resp[9]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
#unit:mg
acc.append(self.combine_reg('>h', resp[10],resp[11]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[12],resp[13]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[14],resp[15]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
else: #300,330 is here
GPIO.output(self.cs_channel,GPIO.LOW)
# xfer2([value],speed_hz,delay_usec_cs), SPI bi-direction data transfer.
# default 8 bits mode, if speed_hz set to zero means the maximun supported SPI clock.
# delay_usec_cs is the cs hold delay
first_register_send = [first_register,0x00]
if '330BA' in self.module and first_register == 0x3D:
subregister_num += 6
for i_else in range(2*subregister_num):
first_register_send.append(0x00)
resp = self.spi.xfer2(first_register_send,self.speed,2*self.delay)
GPIO.output(self.cs_channel,GPIO.HIGH)
sts.append(self.combine_reg('>H', resp[2], resp[3]) * (sratm_fac.get("status")[0]) + (sratm_fac.get("status")[1]))
#unit:degree per second
if '330BA' in self.module and first_register == 0x3D:
rate.append(self.combine_reg('>i', resp[4],resp[5],resp[6],resp[7]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>i', resp[8],resp[9], resp[10],resp[11]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>i', resp[12],resp[13], resp[14],resp[15]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
else:
rate.append(self.combine_reg('>h', resp[4],resp[5]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[6],resp[7]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[8],resp[9]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
#unit:g
if '330BA' in self.module and first_register == 0x3D:
acc.append(self.combine_reg('>i', resp[16],resp[17], resp[18],resp[19]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>i', resp[20],resp[21], resp[22],resp[23]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>i', resp[24],resp[25], resp[26],resp[27]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
else:
acc.append(self.combine_reg('>h', resp[10],resp[11]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[12],resp[13]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[14],resp[15]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
#unit:deg
if '330BI' in self.module and first_register == 0x3F:
deg.append(self.combine_reg('>h', resp[18],resp[19]) * 360/65536)
deg.append(self.combine_reg('>h', resp[20],resp[21]) * 360/65536)
deg.append(self.combine_reg('>h', resp[22],resp[23]) * 360/65536)
# return rate, acc, deg
if '330BA' in self.module and first_register == 0x3D:
temp.append(self.combine_reg('>h', resp[28],resp[29]) * (sratm_fac.get("temp")[0]) + (sratm_fac.get("temp")[1]))
else:
temp.append(self.combine_reg('>h', resp[16],resp[17]) * (sratm_fac.get("temp")[0]) + (sratm_fac.get("temp")[1]))
if ("330BA" in self.module or '331BI' in self.module) and (first_register == 0x3F or first_register == 0x3D):
if first_register == 0x3F:
tmstp.append(self.combine_reg('>H', resp[18],resp[19]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
tmstp.append(self.combine_reg('>H', resp[20],resp[21]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
else:
tmstp.append(self.combine_reg('>H', resp[30],resp[31]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
tmstp.append(self.combine_reg('>H', resp[32],resp[33]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
# return rate, acc, tmstp
if '300ZI' in self.module and first_register == 0x3F:
mag.append(self.combine_reg('>h', resp[18],resp[19]) * (sratm_fac.get("mag")[0]) + (sratm_fac.get("mag")[1]))
mag.append(self.combine_reg('>h', resp[20],resp[21]) * (sratm_fac.get("mag")[0]) + (sratm_fac.get("mag")[1]))
mag.append(self.combine_reg('>h', resp[22],resp[23]) * (sratm_fac.get("mag")[0]) + (sratm_fac.get("mag")[1]))
if '300ZI' in self.module and first_register == 0x3D:
# deg.append(self.combine_reg('>h', resp[18],resp[19]) * 57.3 * (2*math.pi)/65536) #65536/(2*math.pi)=10430.378350470453 65536/360=0.0054931640625
# deg.append(self.combine_reg('>h', resp[20],resp[21]) * 57.3 * (2*math.pi)/65536)
# deg.append(self.combine_reg('>h', resp[22],resp[23]) * 57.3 * (2*math.pi)/65536)
deg.append(self.combine_reg('>h', resp[18],resp[19]) * (sratm_fac.get("vg_angle")[0]) + (sratm_fac.get("vg_angle")[1]))
deg.append(self.combine_reg('>h', resp[20],resp[21]) * (sratm_fac.get("vg_angle")[0]) + (sratm_fac.get("vg_angle")[1]))
deg.append(self.combine_reg('>h', resp[22],resp[23]) * (sratm_fac.get("vg_angle")[0]) + (sratm_fac.get("vg_angle")[1]))
return sts, rate, acc, temp, mag, deg, tmstp
def spidev_setting(self):
bus = 0 #supporyed values:0,1
device = 1 #supported values:0,1 default: 0
self.spi.open(bus,device) #connect to the device. /dev/spidev<bus>.<device>
self.spi.bits_per_word = self.word #默认是8,系统上
self.spi.max_speed_hz = self.speed
self.spi.mode = 0b11
#spi.bits_per_word = 0
#spi.cshigh #default CS0 in pi3 board
#spi.lsbfirst = False
#spi.threewire = 0
return True
def check_settings(self):
print(self.spi.mode)
print(self.spi.threewire)
print(self.spi.cshigh)
print(self.spi.bits_per_word)
print(self.spi.lsbfirst)
return True
def combine_reg(self,fmt='>h',*msb_lsb):
temp_bytes = b''
for i in msb_lsb:
temp_bytes += struct.pack('B',i)
return struct.unpack(fmt,temp_bytes)[0] #MSB firstly
def power_reset(self, delay=2):
'''
#special for IMU331, WAIT 1.25S at least
'''
self.power.power_off()
time.sleep(delay)
self.power.power_on()
time.sleep(delay)
def __del__(self):
GPIO.cleanup()
self.spi.close()
if __name__ == "__main__":
openimu_spi = SpiOpenIMU(target_module="330BI",drdy_status=True, fw='1.2.1') #set the module name and drdy status(enalbe or not)-----------------step: 1
burst_read, single_read, single_write = True, True, False # set the read style, burst or single------------step:2
f = open("data_" + str(openimu_spi.module) + ".txt", "w")
str_config = "module style:{0}; drdy:{1}; burst read:{2}; single read:{3} \n".format(openimu_spi.module, openimu_spi.drdy, burst_read, single_read)
print(str_config)
f.write(str_config)
input("Power on IMU !!!!!!!!")
time.sleep(2)
try:
if openimu_spi.drdy == False: # when no drdy, default SPI ODR is 100HZ
time.sleep(0.01)
# for i_wd in range(9,12):
# for i_wd in [0x00, 0x03,0x04,0x05,0x06,0x30,0x40,0x50,0x60,0x0B, 0x0B]:
# ori_list = [0x0000, 0x0009, 0x0023, 0x002A, 0x0041, 0x0048, 0x0062, 0x006B, 0x0085, 0x008C, 0x0092, 0x009B, 0x00C4, 0x00CD, 0x00D3,
# 0x00DA, 0x0111, 0x0118, 0x0124, 0x012D, 0x0150, 0x0159, 0x0165, 0x016C
# ]
# ori_list = [0x0009, 0x016C]
# for i_wd in ori_list:
if single_read:
read_name = [
"X_Rate", "Y_Rate", "Z_Rate", "X_Accel", "Y_Accel", "Z_Accel","X_Mag", "Y_Mag", "Z_Mag", "BOARD_TEMP", "RATE_TEMP", "DRDY_RATE", "ACCEL_LPF", "ACCEL_SCALE_FACTOR", "RATE_SCALE_FACTOR",
"SN_1", "SN_2", "SN_3", "PRODUCT_ID", "MASTER_STATUS", "HW_STATUS", "SW_STATUS", "ACCEL_RANGE/RATE_RANGE",
"ORIENTATION_MSB/ORIENTATION_LSB", "SAVE_CONFIG", "RATE_LPF", "HW_VERSION/SW_VERSION"
]
read_reg = [
0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x37, 0x38, 0x46, 0x47,
0x52, 0x54, 0x58, 0x56, 0x5A, 0x5C, 0x5E, 0x70, 0x74, 0x76, 0x78, 0x7E
]
# read_name = ["ORIENTATION_MSB"]
# read_reg = [0x74]
for i in zip(read_name, read_reg):
read_value = openimu_spi.single_read(i[1])
hex_value = hex(read_value)
prt_list = [i[0], hex(i[1]), hex_value]
print(prt_list)
if 'Rate' in i[0]:
read_value /= 64
elif 'Accel' in i[0]:
read_value /= 4000
elif 'Mag' in i[0]:
read_value /= 16354
elif 'TEMP' in i[0]:
read_value = read_value*0.073111172849435 + 31.0
str_temp = "{0:_<40s}0x{1:<5X} read value: 0d {2:<10} hex value: {3:<10s}\n".format(i[0], i[1], read_value, hex_value)
print(str_temp)
f.write(str(prt_list) + '\n' + str_temp)
if single_write:
while input('need write? y/n?') != 'y':
pass
write_name = ["packet rate", "Accel LPF", "orimsb", "orilsb", "Rate LPF", "save config"]
write_reg = [0x37, 0x38, 0x74, 0x75, 0x78, 0x76]
write_data = [0x01, 0x40, 0x00, 0x6B, 0x40, 0x00]
# write_name = ["ORIENTATION_LSB"]
# write_reg = [0x75]
# write_data = [0x02]
# write_data = [i_wd, i_wd]
# write_data = [i_wd & 0xFF]
for j in zip(write_name, write_reg, write_data): #start to write registers
print("write_name:{0:<40s}, write address:0x{1:<5X}, wirte data:0x{2:<5X}".format(j[0], j[1], j[2]))
openimu_spi.single_write(j[1], j[2])
time.sleep(0.5)
# if single_read or single_write:
# break
while input('need burst read? y/n?') != 'y':
pass
while burst_read: # not seting the ODR, if you use burst read, it will same with frequency of DRDY
if ('330BI' in openimu_spi.module) or ('330BA' in openimu_spi.module):
# list_rate, list_acc = openimu_spi.burst_read(first_register=0x3E,subregister_num=8) #input the read register and numbers of subregisters want to read together
# str_burst = "time:{0:>10f}; gyro:{1:>25s}; accel:{2:>25s} \n".format(
# time.clock(), ", ".join([str(x) for x in list_rate]), ", ".join([str(x) for x in list_acc])
# )
list_sts, list_rate, list_acc, list_temp, list_mag, list_deg, tmstamp = openimu_spi.burst_read(first_register=0x3E,subregister_num=8) #input the read register and numbers of subregisters want to read together
str_burst = "time:{0:>10f}; gyro:{1:>50s}; accel:{2:>50s}; timestamp:{3:>25s} \n".format(
time.clock(), ", ".join([str(x) for x in list_rate]), ", ".join([str(x) for x in list_acc]), ", ".join([str(x) for x in tmstamp])
)
else:
list_sts, list_rate, list_acc, list_temp, list_mag, list_deg, tmstamp= openimu_spi.burst_read(first_register=0x3D,subregister_num=11)
str_burst = "time:{0:>20f}; status:{3:>20s} ; gyro:{1:>50s}; accel:{2:>40s}; temp:{4:>10s}; mag:{5:>20s}; deg:{6:>20s}\n".format(
time.clock(), ", ".join([str(x) for x in list_rate]), ", ".join([str(x) for x in list_acc]), ", ".join([str(x) for x in list_sts]),
", ".join([str(x) for x in list_temp]), ", ".join([str(x) for x in list_mag]), ", ".join([str(x) for x in list_deg])
)
print(str_burst)
f.write(str_burst)
# input('next cycle')
except KeyboardInterrupt:
f.close()
print("stoped by customer!")
# except Exception as e:
# print(e)
# traceback.print_exc()
# polling mode reading spi interface, with drdy pin detection
# try:
# while True:
# # GPIO.output(cs_channel,GPIO.LOW)
# # product_id = spi.xfer2([0x56,0x00,0x00,0x00],0,10)
# # GPIO.output(cs_channel,GPIO.HIGH)
# # print('id',product_id)
# time.sleep(0.1)
# # if GPIO.event_detected(interrupt_channel):
# if True:
# time.sleep(0.5)
# GPIO.output(cs_channel,GPIO.LOW)
# # xfer2([value],speed_hz,delay_usec_cs), SPI bi-direction data transfer.
# # default 8 bits mode, if speed_hz set to zero means the maximun supported SPI clock.
# # delay_usec_cs is the cs hold delay
# resp = spi.xfer2([openimu_spi.burst_cmd_std,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],0,10)
# GPIO.output(cs_channel,GPIO.HIGH)
# #unit:degree per second
# x_rate = openimu_spi.combine_reg(resp[4],resp[5])/200
# y_rate = openimu_spi.combine_reg(resp[6],resp[7])/200
# z_rate = openimu_spi.combine_reg(resp[8],resp[8])/200
# #unit:mg
# x_acc = openimu_spi.combine_reg(resp[10],resp[11])/4
# y_acc = openimu_spi.combine_reg(resp[12],resp[13])/4
# z_acc = openimu_spi.combine_reg(resp[14],resp[15])/4
# print('g/a',x_rate,y_rate,z_rate,x_acc,y_acc,z_acc)
# #write to register
# time.sleep(0.5)
# GPIO.output(cs_channel,GPIO.LOW)
# resp1 = spi.xfer2([0x80|0x50,0x23],0,10)
# time.sleep(0.5)
# GPIO.output(cs_channel,GPIO.HIGH)
# 0x56 OPEN300 ID: 0x30(48) 0x00(0)
# 0x56 OPEN330 ID: 0x33(48) 0x00(0)
# 0x56 IMU381 ID: 0X38(56) 0x10(16)
|
the-stack_0_11625 | import struct
from django.forms import ValidationError
from .const import (
BANDTYPE_FLAG_HASNODATA, GDAL_TO_POSTGIS, GDAL_TO_STRUCT,
POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL, STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return struct.pack('<' + structure, *data)
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, bytes.fromhex(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Remove nodata byte from band nodata value if it exists.
has_nodata = pixeltype & BANDTYPE_FLAG_HASNODATA
if has_nodata:
pixeltype &= ~BANDTYPE_FLAG_HASNODATA
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': bytes.fromhex(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Pack raster header.
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype with BANDTYPE_FLAG_HASNODATA (1 << 6)
# for existing nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) | BANDTYPE_FLAG_HASNODATA
#
# For example, if the byte value is 71, then the datatype is
# 71 & ~BANDTYPE_FLAG_HASNODATA = 7 (32BSI)
# and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype |= BANDTYPE_FLAG_HASNODATA
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Add packed header and band data to result
result += bandheader + band.data(as_memoryview=True)
# Convert raster to hex string before passing it to the DB.
return result.hex()
|
the-stack_0_11626 | # -*- coding: utf-8 -*-
from django.db import migrations
import organizations.fields
class Migration(migrations.Migration):
dependencies = [("organizations", "0001_initial")]
operations = [
migrations.AlterField(
model_name="organization",
name="slug",
field=organizations.fields.SlugField(
blank=True,
editable=False,
help_text="The name in all lowercase, suitable for URL identification",
max_length=200,
populate_from=("name",),
unique=True,
),
)
]
|
the-stack_0_11627 | # (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""HPE Container Platform CLI."""
from __future__ import print_function
import sys
from hpecp.k8s_worker import WorkerK8sStatus, WorkerK8s
from hpecp.cli import base
from textwrap import dedent
class K8sWorkerProxy(base.BaseProxy):
"""Proxy object to :py:attr:`<hpecp.client.k8s_worker>`."""
def __dir__(self):
"""Return the CLI method names."""
return [
"create_with_ssh_key",
"delete",
"examples",
"get",
"list",
"set_storage",
"statuses",
"wait_for_status",
]
def __init__(self):
"""Create instance of proxy class with the client module name."""
super(K8sWorkerProxy, self).new_instance("k8s_worker", WorkerK8s)
@base.intercept_exception
def create_with_ssh_key(
self,
ip=None,
ssh_key=None,
ssh_key_file=None,
ssh_passphrase=None,
tags=None,
ephemeral_disks=None,
persistent_disks=None,
wait_for_operation_secs=0,
):
"""Create a K8s Worker using SSH key authentication.
Parameters
----------
ip : str, optional
The IP address of the host, this is used for internal
communication, by default None.
ssh_key : str, optional
The SSH key data as a string, instead of this location to a key
file may also be provided, by default None.
ssh_key_file : str, optional
The SSH key file path, by default None
ssh_passphrase: str, optional
The SSH passphrase
tags : list, optional
Tags to use, e.g. /api/v2/tag/1:foo,/api/v2/tag/1:bar,
by default None
ephemeral_disks : str
Comma separated string containing ephemeral disks.
e.g: "/dev/nvme2n1,/dev/nvme2n2"
persistent_disks : str, optional
Comma separated string containing persistent disks, by default
None.
e.g: "/dev/nvme1n1,/dev/nvme1n2"
wait_for_operation_secs: int
wait for operations to complete. 0 = don't wait
"""
if ssh_key is None and ssh_key_file is None:
print(
"At least one of ssh_key or ssh_key_file must be provided",
file=sys.stderr,
)
sys.exit(1)
if ssh_key is not None and ssh_key_file is not None:
print(
(
"Either ssh_key or ssh_key_file must be provided,"
" but not both."
),
file=sys.stderr,
)
sys.exit(1)
if ssh_key_file:
try:
with open(ssh_key_file) as f:
ssh_key = f.read()
except OSError:
print(
"Could not open/read ssh-key-file: {}".format(
ssh_key_file
),
file=sys.stderr,
)
sys.exit(1)
if (
ephemeral_disks is not None or persistent_disks is not None
) and wait_for_operation_secs == 0:
print(
(
"If setting disks, 'wait-for-operation-secs' parameter"
" must be greater than zero (recommended 600 seconds)"
),
file=sys.stderr,
)
sys.exit(1)
tags_parsed = []
if tags is not None:
for tag in tags.split(","):
k, v = tag.split(":")
tags_parsed.append({"tag_id": k, "tag_value": v})
worker_id = base.get_client().k8s_worker.create_with_ssh_key(
ip=ip,
ssh_key_data=ssh_key,
ssh_passphrase=ssh_passphrase,
tags=tags_parsed,
)
if wait_for_operation_secs > 0:
self.wait_for_status(
id=worker_id,
status=["storage_pending", "error"],
timeout_secs=wait_for_operation_secs,
)
if base.get_client().k8s_worker.get(id=worker_id).status == "error":
print(
(
"Create request has errored. "
"Check status message with `hpecp k8sworker get {}".format(
id
)
),
file=sys.stderr,
)
sys.exit(1)
if ephemeral_disks is not None or persistent_disks is not None:
self.set_storage(
id=worker_id,
ephemeral_disks=ephemeral_disks,
persistent_disks=persistent_disks,
)
if wait_for_operation_secs > 0:
self.wait_for_status(
id=worker_id,
status=["ready"],
timeout_secs=wait_for_operation_secs,
)
print(worker_id)
def examples(self):
"""Show examples for working with k8sclusters."""
print(
dedent(
"""\
# Find id of k8s workers by ip address
$ hpecp k8sworker list --query "[*] | @[?contains('10.0.1.10 10.0.1.210', ipaddr)] | [*][_links.self.href]" --output text
/api/v2/worker/k8shost/5
/api/v2/worker/k8shost/7
# Retrieve the first master node of a K8S Cluster
$ hpecp k8scluster list --query "[?_links.self.href == '/api/v2/k8scluster/1'] | [0] | [k8shosts_config] | [0] | [?role == 'master'] | [0] | [node]" -o text
/api/v2/worker/k8shost/7
""" # noqa: E501
)
)
# TODO: verify with engineering if setup_log is a valid parameter
# def get(self, id, setup_log=False):
# """Get a K8SWorker."""
# if setup_log is True:
# params = {"setup_log": "true"}
# else:
# params = {}
# return super(K8sWorkerProxy, self).get(id=id, params=params)
@base.intercept_exception
def set_storage(
self,
id,
ephemeral_disks,
persistent_disks=None,
):
"""Set storage for a k8s worker.
Parameters
----------
id : str
The k8s worker ID
ephemeral_disks : str
Comma separated string containing ephemeral disks.
e.g: "/dev/nvme2n1,/dev/nvme2n2"
persistent_disks : str, optional
Comma separated string containing persistent disks, by default
None.
e.g: "/dev/nvme1n1,/dev/nvme1n2"
"""
if not ephemeral_disks:
print("'ephemeral_disks' must be provided", file=sys.stderr)
sys.exit(1)
p_disks = (
persistent_disks.split(",") if persistent_disks is not None else []
)
e_disks = ephemeral_disks.split(",")
base.get_client().k8s_worker.set_storage(
worker_id=id,
persistent_disks=p_disks,
ephemeral_disks=e_disks,
)
def statuses(
self,
):
"""Return a list of valid statuses."""
print([s.name for s in WorkerK8sStatus])
|
the-stack_0_11628 | from twython import Twython
def read_strings_from_file(file_path, how_many):
with open(file_path, 'r') as file:
data = file.read()
return data.split()[:how_many]
def read_key_and_secret(file_path):
return read_strings_from_file(file_path, 2)
def read_token_secret_pin(file_path):
return read_strings_from_file(file_path, 2)
def write_token_secret(file_path, token, secret):
with open(file_path, 'w') as file:
file.write("{}\n{}".format(token, secret))
def auth_app(key_file, auth_file):
app_key, app_secret = read_key_and_secret(key_file)
# obtaining URL for authentication
twitter = Twython(app_key, app_secret)
auth = twitter.get_authentication_tokens()
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
# request pin
print('Go here: {}'.format(auth['auth_url']))
pin = input('PIN? ')
# complete authorization with PIN
twitter = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
auth = twitter.get_authorized_tokens(pin)
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
# write token and secret to file
write_token_secret(auth_file, oauth_token, oauth_token_secret)
print('auth credentials written to: {}'.format(auth_file))
def twython_from_key_and_auth(key_file, auth_file):
app_key, app_secret = read_key_and_secret(key_file)
oauth_token, oauth_token_secret = read_token_secret_pin(auth_file)
return Twython(app_key, app_secret, oauth_token, oauth_token_secret)
|
the-stack_0_11632 | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import DATA,Float64Array,tdi
import sys
example = '/frame?tree=expt&shot=123&y=SIGNAL:NODE&x=0.0&frame_idx=0'
def doFrame(self):
def getStringExp(self,name,response_headers,_tdi):
if name in self.args:
try:
response_headers.append((name,str(_tdi(self.args[name][-1]).data())))
except Exception as e:
response_headers.append((name,"ERROR: %s"%(e,)))
response_headers = list()
response_headers.append(('Cache-Control','no-store, no-cache, must-revalidate'))
response_headers.append(('Pragma','no-cache'))
response_headers.append(('Content-Type','application/octet-stream'))
if 'tree' in self.args:
tree = self.openTree(self.args['tree'][-1],self.args['shot'][-1].split(',')[0])
_tdi = tree.tdiExecute
else:
tree = None
_tdi = tdi
for name in ('title','xlabel','ylabel'):
getStringExp(self,name,response_headers,_tdi)
if 'frame_idx' in self.args:
frame_idx = self.args['frame_idx'][-1]
else:
frame_idx = '0'
expr = self.args['y'][-1]
sig = _tdi('GetSegment(' + expr + ',' + frame_idx + ')')
frame_data = DATA(sig).evaluate()
response_headers.append(('FRAME_WIDTH',str(sig.getShape()[0])))
response_headers.append(('FRAME_HEIGHT',str(sig.getShape()[1])))
response_headers.append(('FRAME_BYTES_PER_PIXEL',str(frame_data.data().itemsize)))
response_headers.append(('FRAME_LENGTH',str(len(frame_data))))
output = str(frame_data.data().data)
if 'init' in self.args:
if 'x' in self.args:
expr = self.args['x'][-1]
times = DATA(_tdi(expr)).evaulate()
else:
times = list()
numSegments = _tdi('GetNumSegments(' + expr + ')').data()
for i in range(0, numSegments):
times.append(_tdi('GetSegmentLimits(' + expr + ',' + str(i) + ')').data()[0])
times = Float64Array(times)
response_headers.append(('TIMES_DATATYPE',times.__class__.__name__))
response_headers.append(('TIMES_LENGTH',str(len(times))))
output = output + str(times.data().data)
status = '200 OK'
return (status, response_headers, output)
|
the-stack_0_11636 | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import messages
from django.http import HttpResponseRedirect
from cfbets.forms import SignUpForm, UserProfileForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from bets.models import ProposedBet, AcceptedBet, UserProfile
from common.stats import *
from django.contrib.auth.models import User
def welcome(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/bets/my_bets')
else:
return render(request, 'base_welcome.html')
def sign_up(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
# check the group id
group_id = request.POST.get('group_id')
if group_id != '' and group_id != 'cl3ms0n':
form.add_error('group_id', 'Not a valid group id.')
elif form.is_valid():
form.save()
new_user = authenticate(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
if new_user is not None:
login(request, new_user)
return HttpResponseRedirect("/")
else:
return HttpResponseRedirect("/login")
else:
form = SignUpForm()
return render(request, 'base_sign_up.html', {'form': form})
@login_required(login_url='/login/')
def profile(request):
# get the current user
current_user = User.objects.get(id=request.user.id)
# get the current user profile
current_user_profile = UserProfile.objects.get(user=current_user)
# all the form stuff
if request.method == 'POST':
user_profile_form = UserProfileForm(request.POST)
if user_profile_form.is_valid():
# save data for current user / user profile
current_user.first_name = user_profile_form.cleaned_data['first_name']
current_user.last_name = user_profile_form.cleaned_data['last_name']
current_user_profile.get_prop_bet_emails = user_profile_form.cleaned_data[
'get_prop_bet_emails']
current_user_profile.get_accepted_bet_emails = user_profile_form.cleaned_data[
'get_accepted_bet_emails']
current_user.save(update_fields=['first_name', 'last_name'])
current_user_profile.save(
update_fields=[
'get_prop_bet_emails',
'get_accepted_bet_emails'])
messages.success(request, 'Profile saved successfully.')
return HttpResponseRedirect("/profile")
else:
user_profile_form = UserProfileForm(
initial={
'first_name': current_user.first_name,
'last_name': current_user.last_name,
'email': current_user.email,
'get_prop_bet_emails': current_user_profile.get_prop_bet_emails,
'get_accepted_bet_emails': current_user_profile.get_accepted_bet_emails})
total_won_bets = get_total_wins(current_user)
total_loss_bets = get_total_losses(current_user)
total_tie_bets = get_total_ties(current_user)
return render(request,
'base_profile.html',
{'user_profile_form': user_profile_form,
'total_won_bets': total_won_bets,
'total_tie_bets': total_tie_bets,
'total_loss_bets': total_loss_bets})
|
the-stack_0_11637 | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from .base_vfi_dataset import BaseVFIDataset
from .registry import DATASETS
@DATASETS.register_module()
class VFIVimeo90K7FramesDataset(BaseVFIDataset):
"""Utilize Vimeo90K dataset (7 frames) for video frame interpolation.
Load 7 GT (Ground-Truth) frames from the dataset, predict several frame(s)
from other frames.
Then it applies specified transforms and finally returns a dict
containing paired data and other information.
It reads Vimeo90K keys from the txt file. Each line contains:
1. video frame folder
2. number of frames
3. image shape
Examples:
::
00001/0266 7 (256,448,3)
00001/0268 7 (256,448,3)
Note: Only `video frame folder` is required information.
Args:
folder (str | :obj:`Path`): Path to image folder.
ann_file (str | :obj:`Path`): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transformations.
input_frames (list[int]): Index of input frames.
target_frames (list[int]): Index of target frames.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
folder,
ann_file,
pipeline,
input_frames,
target_frames,
test_mode=False):
super().__init__(
pipeline=pipeline,
folder=folder,
ann_file=ann_file,
test_mode=test_mode)
self.input_frames = input_frames
self.target_frames = target_frames
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for Vimeo-90K dataset.
Returns:
list[dict]: A list of dicts for paired paths and other information.
"""
# get keys
with open(self.ann_file, 'r') as fin:
keys = [line.strip().split(' ')[0] for line in fin]
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
inputs_path = [
osp.join(self.folder, key, f'im{i}.png')
for i in self.input_frames
]
target_path = [
osp.join(self.folder, key, f'im{i}.png')
for i in self.target_frames
]
data_infos.append(
dict(
inputs_path=inputs_path, target_path=target_path, key=key))
return data_infos
|
the-stack_0_11639 | """ This process performs a restore of all the application entities from a
given restore.
"""
import argparse
import logging
import os
from appscale.common import appscale_info
from ..backup.datastore_restore import DatastoreRestore
from ..dbconstants import APP_ENTITY_SCHEMA
from ..dbconstants import APP_ENTITY_TABLE
from ..dbconstants import APP_KIND_SCHEMA
from ..dbconstants import APP_KIND_TABLE
from ..dbconstants import ASC_PROPERTY_TABLE
from ..dbconstants import COMPOSITE_SCHEMA
from ..dbconstants import COMPOSITE_TABLE
from ..dbconstants import DSC_PROPERTY_TABLE
from ..dbconstants import PROPERTY_SCHEMA
from ..utils import fetch_and_delete_entities
from ..zkappscale import zktransaction as zk
# Where to look to verify the app is deployed.
_APPS_LOCATION = '/var/apps/'
logger = logging.getLogger(__name__)
def init_parser():
""" Initializes the command line argument parser.
Returns:
A parser object.
"""
parser = argparse.ArgumentParser(
description='Restore application code and data.')
main_args = parser.add_argument_group('main args')
main_args.add_argument('-a', '--app-id', required=True,
help='The application ID to restore data under.')
main_args.add_argument('-b', '--backup-dir', required=True,
help='The backup directory to restore data from.')
main_args.add_argument('-c', '--clear-datastore', required=False,
action="store_true", default=False, help='Start with a clean datastore.')
main_args.add_argument('-d', '--debug', required=False, action="store_true",
default=False, help='Display debug messages.')
# TODO
# Read in source code location and owner and deploy the app
# before restoring data.
return parser
def app_is_deployed(app_id, zk_client):
""" Looks for the app directory in the deployed apps location.
Args:
app_id: A str, the application ID.
Returns:
True on success, False otherwise.
"""
if not zk_client.exists('/appscale/projects/{}'.format(app_id)):
logger.error("Seems that \"{0}\" is not deployed.".format(app_id))
logger.info("Please deploy \"{0}\" and try again.".\
format(app_id))
return False
return True
def backup_dir_exists(backup_dir):
""" Checks it the given backup directory exists.
Args:
backup_dir: A str, the location of the backup directory containing all
backup files.
Returns:
True on success, False otherwise.
"""
if not os.path.exists(backup_dir):
logger.error("Error while accessing backup files.")
logger.info("Please provide a valid backup directory.")
return False
return True
def main():
""" This main function allows you to run the restore manually. """
# Parse CLI arguments.
parser = init_parser()
args = parser.parse_args()
# Set up logging.
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \
'%(lineno)s %(message)s ', level=level)
logger.info("Logging started")
logger.info(args)
zk_connection_locations = appscale_info.get_zk_locations_string()
zookeeper = zk.ZKTransaction(host=zk_connection_locations)
# Verify app is deployed.
if not app_is_deployed(args.app_id, zookeeper.handle):
return
# Verify backup dir exists.
if not backup_dir_exists(args.backup_dir):
return
if args.clear_datastore:
message = "Deleting \"{0}\" data...".\
format(args.app_id, args.backup_dir)
logger.info(message)
try:
tables_to_clear = {
APP_ENTITY_TABLE: APP_ENTITY_SCHEMA,
ASC_PROPERTY_TABLE: PROPERTY_SCHEMA,
DSC_PROPERTY_TABLE: PROPERTY_SCHEMA,
COMPOSITE_TABLE: COMPOSITE_SCHEMA,
APP_KIND_TABLE: APP_KIND_SCHEMA
}
for table, schema in tables_to_clear.items():
fetch_and_delete_entities('cassandra', table, schema, args.app_id, False)
except Exception as exception:
logger.error("Unhandled exception while deleting \"{0}\" data: {1} " \
"Exiting...".format(args.app_id, exception.message))
return
# Initialize connection to Zookeeper and database related variables.
db_info = appscale_info.get_db_info()
table = db_info[':table']
# Start restore process.
ds_restore = DatastoreRestore(args.app_id.strip('/'), args.backup_dir,
zookeeper, table)
try:
ds_restore.run()
finally:
zookeeper.close()
|
the-stack_0_11640 | """Helper sensor for calculating utility costs."""
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
from typing import Any, Final, Literal, TypeVar, cast
from homeassistant.components.sensor import (
ATTR_LAST_RESET,
DEVICE_CLASS_MONETARY,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.core import HomeAssistant, State, callback, split_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from .const import DOMAIN
from .data import EnergyManager, async_get_manager
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the energy sensors."""
manager = await async_get_manager(hass)
process_now = partial(_process_manager_data, hass, manager, async_add_entities, {})
manager.async_listen_updates(process_now)
if manager.data:
await process_now()
T = TypeVar("T")
@dataclass
class FlowAdapter:
"""Adapter to allow flows to be used as sensors."""
flow_type: Literal["flow_from", "flow_to"]
stat_energy_key: Literal["stat_energy_from", "stat_energy_to"]
entity_energy_key: Literal["entity_energy_from", "entity_energy_to"]
total_money_key: Literal["stat_cost", "stat_compensation"]
name_suffix: str
entity_id_suffix: str
FLOW_ADAPTERS: Final = (
FlowAdapter(
"flow_from",
"stat_energy_from",
"entity_energy_from",
"stat_cost",
"Cost",
"cost",
),
FlowAdapter(
"flow_to",
"stat_energy_to",
"entity_energy_to",
"stat_compensation",
"Compensation",
"compensation",
),
)
async def _process_manager_data(
hass: HomeAssistant,
manager: EnergyManager,
async_add_entities: AddEntitiesCallback,
current_entities: dict[tuple[str, str], EnergyCostSensor],
) -> None:
"""Process updated data."""
to_add: list[SensorEntity] = []
to_remove = dict(current_entities)
async def finish() -> None:
if to_add:
async_add_entities(to_add)
for key, entity in to_remove.items():
current_entities.pop(key)
await entity.async_remove()
if not manager.data:
await finish()
return
for energy_source in manager.data["energy_sources"]:
if energy_source["type"] != "grid":
continue
for adapter in FLOW_ADAPTERS:
for flow in energy_source[adapter.flow_type]:
# Opting out of the type complexity because can't get it to work
untyped_flow = cast(dict, flow)
# No need to create an entity if we already have a cost stat
if untyped_flow.get(adapter.total_money_key) is not None:
continue
# This is unique among all flow_from's
key = (adapter.flow_type, untyped_flow[adapter.stat_energy_key])
# Make sure the right data is there
# If the entity existed, we don't pop it from to_remove so it's removed
if untyped_flow.get(adapter.entity_energy_key) is None or (
untyped_flow.get("entity_energy_price") is None
and untyped_flow.get("number_energy_price") is None
):
continue
current_entity = to_remove.pop(key, None)
if current_entity:
current_entity.update_config(untyped_flow)
continue
current_entities[key] = EnergyCostSensor(
adapter,
manager.data["currency"],
untyped_flow,
)
to_add.append(current_entities[key])
await finish()
class EnergyCostSensor(SensorEntity):
"""Calculate costs incurred by consuming energy.
This is intended as a fallback for when no specific cost sensor is available for the
utility.
"""
def __init__(
self,
adapter: FlowAdapter,
currency: str,
flow: dict,
) -> None:
"""Initialize the sensor."""
super().__init__()
self._adapter = adapter
self.entity_id = f"{flow[adapter.entity_energy_key]}_{adapter.entity_id_suffix}"
self._attr_device_class = DEVICE_CLASS_MONETARY
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._attr_unit_of_measurement = currency
self._flow = flow
self._last_energy_sensor_state: State | None = None
def _reset(self, energy_state: State) -> None:
"""Reset the cost sensor."""
self._attr_state = 0.0
self._attr_last_reset = dt_util.utcnow()
self._last_energy_sensor_state = energy_state
self.async_write_ha_state()
@callback
def _update_cost(self) -> None:
"""Update incurred costs."""
energy_state = self.hass.states.get(
cast(str, self._flow[self._adapter.entity_energy_key])
)
if energy_state is None or ATTR_LAST_RESET not in energy_state.attributes:
return
try:
energy = float(energy_state.state)
except ValueError:
return
# Determine energy price
if self._flow["entity_energy_price"] is not None:
energy_price_state = self.hass.states.get(self._flow["entity_energy_price"])
if energy_price_state is None:
return
try:
energy_price = float(energy_price_state.state)
except ValueError:
return
else:
energy_price_state = None
energy_price = cast(float, self._flow["number_energy_price"])
if self._last_energy_sensor_state is None:
# Initialize as it's the first time all required entities are in place.
self._reset(energy_state)
return
cur_value = cast(float, self._attr_state)
if (
energy_state.attributes[ATTR_LAST_RESET]
!= self._last_energy_sensor_state.attributes[ATTR_LAST_RESET]
):
# Energy meter was reset, reset cost sensor too
self._reset(energy_state)
else:
# Update with newly incurred cost
old_energy_value = float(self._last_energy_sensor_state.state)
self._attr_state = cur_value + (energy - old_energy_value) * energy_price
self._last_energy_sensor_state = energy_state
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
energy_state = self.hass.states.get(self._flow[self._adapter.entity_energy_key])
if energy_state:
name = energy_state.name
else:
name = split_entity_id(self._flow[self._adapter.entity_energy_key])[
0
].replace("_", " ")
self._attr_name = f"{name} {self._adapter.name_suffix}"
self._update_cost()
# Store stat ID in hass.data so frontend can look it up
self.hass.data[DOMAIN]["cost_sensors"][
self._flow[self._adapter.entity_energy_key]
] = self.entity_id
@callback
def async_state_changed_listener(*_: Any) -> None:
"""Handle child updates."""
self._update_cost()
self.async_write_ha_state()
self.async_on_remove(
async_track_state_change_event(
self.hass,
cast(str, self._flow[self._adapter.entity_energy_key]),
async_state_changed_listener,
)
)
async def async_will_remove_from_hass(self) -> None:
"""Handle removing from hass."""
self.hass.data[DOMAIN]["cost_sensors"].pop(
self._flow[self._adapter.entity_energy_key]
)
await super().async_will_remove_from_hass()
@callback
def update_config(self, flow: dict) -> None:
"""Update the config."""
self._flow = flow
|
the-stack_0_11644 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates a flagfile containing a resign threshold flag
Reads the bigtables defined by the flags --cbt_{project, instance, table} to
compute the 95 percentile of the bleakest-evaluations found in calibration
games, then updates the flagfile on the default bucket path, resetting that
value.
Recommended usage is via common flagfile (e.g. rl_loop/distributed_flags)
"""
import sys
import re
import os
import time
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
sys.path.insert(0, '.')
import mask_flags
import bigtable_input
import rl_loop.fsdb as fsdb
# Fun fact, this only helps for --helpshort. It's not a validator.
flags.adopt_module_key_flags(bigtable_input)
flags.adopt_module_key_flags(fsdb)
FLAGS = flags.FLAGS
RESIGN_FLAG_REGEX = re.compile(r'--resign_threshold=([-\d.]+)')
def get_95_percentile_bleak(games_nr, n_back=500):
"""Gets the 95th percentile of bleakest_eval from bigtable"""
end_game = int(games_nr.latest_game_number)
start_game = end_game - n_back if end_game >= n_back else 0
moves = games_nr.bleakest_moves(start_game, end_game)
evals = np.array([m[2] for m in moves])
return np.percentile(evals, 5)
def update_flagfile(flags_path, new_threshold):
"""Updates the flagfile at `flags_path`, changing the value for
`resign_threshold` to `new_threshold`
"""
if abs(new_threshold) > 1:
raise ValueError("Invalid new percentile for resign threshold")
with tf.gfile.GFile(flags_path) as f:
lines = f.read()
if new_threshold > 0:
new_threshold *= -1
if not RESIGN_FLAG_REGEX.search(lines):
print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path))
sys.exit(1)
old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1)
lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines)
if abs(float(old_threshold[0]) - new_threshold) < 0.001:
print("Not updating percentiles; {} ~= {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
else:
print("Updated percentile from {} to {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
with tf.gfile.GFile(flags_path, 'w') as f:
f.write(lines)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
games_nr = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table + '-nr')
while True:
new_pct = get_95_percentile_bleak(games_nr)
update_flagfile(fsdb.flags_path(), new_pct)
time.sleep(60 * 3)
if __name__ == '__main__':
valid_flags = list(map(lambda f: '--' + f, FLAGS.flag_values_dict().keys()))
valid_flags += ['--helpshort', '--helpfull', '--help']
parsed_flags = flags.FlagValues().read_flags_from_files(sys.argv[1:])
filtered_flags = mask_flags.filter_flags(parsed_flags, valid_flags)
print(filtered_flags, flush=True)
app.run(main, argv=sys.argv[:1] + filtered_flags)
|
the-stack_0_11647 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent import types as rlt
from reagent.core.configuration import param_hash
from reagent.core.dataclasses import dataclass
from reagent.model_utils.seq2slate_utils import (
DECODER_START_SYMBOL,
PADDING_SYMBOL,
Seq2SlateMode,
Seq2SlateOutputArch,
attention,
clones,
mask_logits_by_idx,
per_symbol_to_per_seq_log_probs,
per_symbol_to_per_seq_probs,
subsequent_mask,
)
from reagent.models.base import ModelBase
from reagent.torch_utils import gather
from torch.nn.parallel.distributed import DistributedDataParallel
logger = logging.getLogger(__name__)
class Generator(nn.Module):
""" Define standard linear + softmax generation step. """
def __init__(self, dim_model, candidate_size, temperature):
super(Generator, self).__init__()
self.dim_model = dim_model
self.candidate_size = candidate_size
self.temperature = temperature
def forward(self, mode, logits=None, tgt_in_idx=None, greedy=None):
if mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
return self._log_probs(logits, tgt_in_idx, mode)
elif mode == Seq2SlateMode.DECODE_ONE_STEP_MODE:
assert greedy is not None
return self._decode_one_step(logits, tgt_in_idx, greedy)
else:
raise NotImplementedError()
def _log_probs(self, logits, tgt_in_idx, mode):
"""
Return the log probability distribution at each decoding step
:param logits: logits of decoder outputs. Shape: batch_size, seq_len, candidate_size
:param tgt_idx: the indices of candidates in decoder input sequences.
The first symbol is always DECODER_START_SYMBOL.
Shape: batch_size, seq_len
"""
assert mode in (
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
)
logits = mask_logits_by_idx(logits, tgt_in_idx)
# log_probs shape: batch_size, seq_len, candidate_size
log_probs = F.log_softmax(logits / self.temperature, dim=2)
return log_probs
def _decode_one_step(self, logits, tgt_in_idx, greedy):
"""
Decode one-step
:param logits: logits of decoder outputs. Shape: batch_size, seq_len, candidate_size
:param tgt_in_idx: input to the decoder, the first symbol is always the
starting symbol. Shape: batch_size, seq_len
:param greedy: whether to greedily pick or sample the next symbol
"""
batch_size = logits.shape[0]
# get the last step logits shape: batch_size, candidate_size
logits = logits[:, -1, :]
# invalidate the padding symbol and decoder-starting symbol
logits[:, :2] = float("-inf")
# invalidate symbols already appeared in decoded sequences
logits = logits.scatter(1, tgt_in_idx, float("-inf"))
prob = F.softmax(logits / self.temperature, dim=1)
if greedy:
_, next_candidate = torch.max(prob, dim=1)
else:
next_candidate = torch.multinomial(prob, num_samples=1, replacement=False)
next_candidate = next_candidate.reshape(batch_size, 1)
# next_candidate: the decoded symbols for the latest step
# shape: batch_size x 1
# prob: generative probabilities of the latest step
# shape: batch_size x candidate_size
return next_candidate, prob
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
"""
def __init__(self, dim_model):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(dim_model)
def forward(self, x, sublayer):
return x + sublayer(self.norm(x))
class Encoder(nn.Module):
"Core encoder is a stack of num_layers layers"
def __init__(self, layer, num_layers):
super(Encoder, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = nn.LayerNorm(layer.dim_model)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
""" Encoder is made up of self-attn and feed forward """
def __init__(self, dim_model, self_attn, feed_forward):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(dim_model), 2)
self.dim_model = dim_model
def forward(self, src_embed, src_mask):
# src_embed shape: batch_size, seq_len, dim_model
# src_src_mask shape: batch_size, seq_len, seq_len
def self_attn_layer(x):
return self.self_attn(x, x, x, src_mask)
# attn_output shape: batch_size, seq_len, dim_model
attn_output = self.sublayer[0](src_embed, self_attn_layer)
# return shape: batch_size, seq_len, dim_model
return self.sublayer[1](attn_output, self.feed_forward)
class Decoder(nn.Module):
""" Generic num_layers layer decoder with masking."""
def __init__(self, layer, num_layers):
super(Decoder, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x, memory, tgt_src_mask, tgt_tgt_mask):
# each layer is one DecoderLayer
for layer in self.layers:
x = layer(x, memory, tgt_src_mask, tgt_tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
""" Decoder is made of self-attn, src-attn, and feed forward """
def __init__(self, size, self_attn, src_attn, feed_forward):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size), 3)
def forward(self, x, m, tgt_src_mask, tgt_tgt_mask):
# x is target embedding or the output of previous decoder layer
# x shape: batch_size, seq_len, dim_model
# m is the output of the last encoder layer
# m shape: batch_size, seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
def self_attn_layer_tgt(x):
return self.self_attn(query=x, key=x, value=x, mask=tgt_tgt_mask)
def self_attn_layer_src(x):
return self.src_attn(query=x, key=m, value=m, mask=tgt_src_mask)
x = self.sublayer[0](x, self_attn_layer_tgt)
x = self.sublayer[1](x, self_attn_layer_src)
# return shape: batch_size, seq_len, dim_model
return self.sublayer[2](x, self.feed_forward)
class MultiHeadedAttention(nn.Module):
def __init__(self, num_heads, dim_model):
""" Take in model size and number of heads """
super(MultiHeadedAttention, self).__init__()
assert dim_model % num_heads == 0
# We assume d_v always equals d_k
self.d_k = dim_model // num_heads
self.num_heads = num_heads
self.linears = clones(nn.Linear(dim_model, dim_model), 4)
def forward(self, query, key, value, mask=None):
if mask is not None:
# Same mask applied to all num_heads heads.
# mask shape: batch_size, 1, seq_len, seq_len
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from dim_model => num_heads x d_k
# self.linear[0, 1, 2] is query weight matrix, key weight matrix, and
# value weight matrix, respectively.
# l(x) represents the transformed query matrix, key matrix and value matrix
# l(x) has shape (batch_size, seq_len, dim_model). You can think l(x) as
# the matrices from a one-head attention; or you can think
# l(x).view(...).transpose(...) as the matrices of num_heads attentions,
# each attention has d_k dimension.
query, key, value = [
l(x).view(nbatches, -1, self.num_heads, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
# x shape: batch_size, num_heads, seq_len, d_k
x, _ = attention(query, key, value, mask, self.d_k)
# 3) "Concat" using a view and apply a final linear.
# each attention's output is d_k dimension. Concat num_heads attention's outputs
# x shape: batch_size, seq_len, dim_model
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.num_heads * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, dim_model, dim_feedforward):
super(PositionwiseFeedForward, self).__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(dim_model, dim_feedforward),
torch.nn.ReLU(),
torch.nn.Linear(dim_feedforward, dim_model),
)
def forward(self, x):
return self.net(x)
class Embedder(nn.Module):
def __init__(self, dim_in, dim_out):
super(Embedder, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.linear = nn.Linear(self.dim_in, self.dim_out)
def forward(self, x):
# x: raw input features. Shape: batch_size, seq_len, dim_in
output = self.linear(x) * math.sqrt(self.dim_out)
# output shape: batch_size, seq_len, dim_out
return output
class PositionalEncoding(nn.Module):
def __init__(self, dim_model, max_len):
super(PositionalEncoding, self).__init__()
self.pos_embed = nn.Embedding(max_len, dim_model)
def forward(self, x):
device = x.device
batch_size, seq_len, _ = x.shape
position_idx = (
torch.arange(0, seq_len).unsqueeze(0).repeat(batch_size, 1).to(device)
)
x = x + self.pos_embed(position_idx)
return x
class BaselineNet(nn.Module):
def __init__(self, state_dim, dim_feedforward, num_stacked_layers):
super(BaselineNet, self).__init__()
nn_blocks = [nn.Linear(state_dim, dim_feedforward), nn.ReLU()]
assert num_stacked_layers >= 1
for _ in range(num_stacked_layers - 1):
nn_blocks.extend([nn.Linear(dim_feedforward, dim_feedforward), nn.ReLU()])
nn_blocks.append(nn.Linear(dim_feedforward, 1))
self.mlp = nn.Sequential(*nn_blocks)
def forward(self, input: rlt.PreprocessedRankingInput):
x = input.state.float_features
return self.mlp(x)
class Seq2SlateTransformerModel(nn.Module):
"""
A Seq2Slate network with Transformer. The network is essentially an
encoder-decoder structure. The encoder inputs a sequence of candidate feature
vectors and a state feature vector, and the decoder outputs an ordered
list of candidate indices. The output order is learned through REINFORCE
algorithm to optimize sequence-wise reward.
One application example is to rank candidate feeds to a specific user such
that the final list of feeds as a whole optimizes the user's engagement.
Seq2Slate paper: https://arxiv.org/abs/1810.02019
Transformer paper: https://arxiv.org/abs/1706.03762
The model archtecture can also adapt to some variations.
(1) The decoder can be autoregressive
(2) The decoder can take encoder scores and perform iterative softmax (aka frechet sort)
(3) No decoder and the output order is solely based on encoder scores
"""
def __init__(
self,
state_dim: int,
candidate_dim: int,
num_stacked_layers: int,
num_heads: int,
dim_model: int,
dim_feedforward: int,
max_src_seq_len: int,
max_tgt_seq_len: int,
output_arch: Seq2SlateOutputArch,
temperature: float = 1.0,
):
"""
:param state_dim: state feature dimension
:param candidate_dim: candidate feature dimension
:param num_stacked_layers: number of stacked layers in Transformer
:param num_heads: number of attention heads used in Transformer
:param dim_model: number of attention dimensions in Transformer
:param dim_feedforward: number of hidden units in FeedForward layers
in Transformer
:param max_src_seq_len: the maximum length of input sequences
:param max_tgt_seq_len: the maximum length of output sequences
:param output_arch: determines seq2slate output architecture
:param temperature: temperature used in decoder sampling
"""
super().__init__()
self.state_dim = state_dim
self.candidate_dim = candidate_dim
self.num_stacked_layers = num_stacked_layers
self.num_heads = num_heads
self.dim_model = dim_model
self.dim_feedforward = dim_feedforward
self.max_src_seq_len = max_src_seq_len
self.max_tgt_seq_len = max_tgt_seq_len
self.output_arch = output_arch
self._DECODER_START_SYMBOL = DECODER_START_SYMBOL
self._PADDING_SYMBOL = PADDING_SYMBOL
self._RANK_MODE = Seq2SlateMode.RANK_MODE
self._PER_SYMBOL_LOG_PROB_DIST_MODE = (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE
)
self._PER_SEQ_LOG_PROB_MODE = Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
self._DECODE_ONE_STEP_MODE = Seq2SlateMode.DECODE_ONE_STEP_MODE
self._ENCODER_SCORE_MODE = Seq2SlateMode.ENCODER_SCORE_MODE
c = copy.deepcopy
attn = MultiHeadedAttention(num_heads, dim_model)
ff = PositionwiseFeedForward(dim_model, dim_feedforward)
self.encoder = Encoder(
EncoderLayer(dim_model, c(attn), c(ff)), num_stacked_layers
)
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
# Compute score at each encoder step
self.encoder_scorer = nn.Linear(dim_model, 1)
# Generator needs to know the output symbol size,
# Possible output symbols include candidate indices, decoder-start symbol
# and padding symbol
self.generator = Generator(dim_model, max_src_seq_len + 2, temperature)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
self.decoder = Decoder(
DecoderLayer(dim_model, c(attn), c(attn), c(ff)), num_stacked_layers
)
self.decoder_logit_proj = nn.Linear(dim_model, max_src_seq_len + 2)
self.generator = Generator(dim_model, max_src_seq_len + 2, temperature)
elif self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
# Compute score at each encoder step
self.encoder_scorer = nn.Linear(dim_model, 1)
self.candidate_embedder = Embedder(candidate_dim, dim_model // 2)
self.state_embedder = Embedder(state_dim, dim_model // 2)
self.positional_encoding_encoder = PositionalEncoding(
dim_model, max_len=max_src_seq_len
)
self.positional_encoding_decoder = PositionalEncoding(
dim_model, max_len=max_tgt_seq_len
)
# Initialize parameters with Glorot / fan_avg.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self._print_model_info()
__constants__ = [
"state_dim",
"candidate_dim",
"num_stacked_layers",
"num_heads",
"dim_model",
"dim_feedforward",
"max_src_seq_len",
"max_tgt_seq_len",
"output_path",
"_DECODER_START_SYMBOL",
"_PADDING_SYMBOL",
"_RANK_MODE",
"_PER_SYMBOL_LOG_PROB_DIST_MODE",
"_PER_SEQ_LOG_PROB_MODE",
"_DECODE_ONE_STEP_MODE",
"_ENCODER_SCORE_MODE",
]
def _print_model_info(self):
def _num_of_params(model):
return len(torch.cat([p.flatten() for p in model.parameters()]))
logger.info(f"Num of total params: {_num_of_params(self)}")
logger.info(f"Num of Encoder params: {_num_of_params(self.encoder)}")
logger.info(
f"Num of Candidate Embedder params: {_num_of_params(self.candidate_embedder)}"
)
logger.info(
f"Num of State Embedder params: {_num_of_params(self.state_embedder)}"
)
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(self.encoder_scorer)}"
)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
logger.info(f"Num of Decoder params: {_num_of_params(self.decoder)}")
logger.info(
f"Num of Decoder Projection params: {_num_of_params(self.decoder_logit_proj)}"
)
elif self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(self.encoder_scorer)}"
)
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
"""
:param input: model input
:param mode: a string indicating which mode to perform.
"rank": return ranked actions and their generative probabilities.
"per_seq_log_probs": return generative log probabilities of given
tgt sequences (used for REINFORCE training)
"per_symbol_log_probs": return generative log probabilties of each
symbol in given tgt sequences (used in TEACHER FORCING and
DIFFERENTIABLE_REWARD training)
:param tgt_seq_len: the length of output sequence to be decoded. Only used
in rank mode
:param greedy: whether to sample based on softmax distribution or greedily
when decoding. Only used in rank mode
"""
if mode == self._RANK_MODE:
if tgt_seq_len is None:
tgt_seq_len = self.max_tgt_seq_len
return self._rank(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_seq_len=tgt_seq_len,
greedy=greedy,
)
elif mode in (self._PER_SEQ_LOG_PROB_MODE, self._PER_SYMBOL_LOG_PROB_DIST_MODE):
assert input.tgt_in_seq is not None
return self._log_probs(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
# pyre-fixme[16]: `Optional` has no attribute `float_features`.
tgt_in_seq=input.tgt_in_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_tgt_mask=input.tgt_tgt_mask,
tgt_in_idx=input.tgt_in_idx,
tgt_out_idx=input.tgt_out_idx,
mode=mode,
)
elif mode == self._ENCODER_SCORE_MODE:
assert self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE
return self.encoder_output_to_scores(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_out_idx=input.tgt_out_idx,
)
def _rank(self, state, src_seq, src_src_mask, tgt_seq_len, greedy):
""" Decode sequences based on given inputs """
device = src_seq.device
batch_size, src_seq_len, candidate_dim = src_seq.shape
candidate_size = src_seq_len + 2
# candidate_features is used as look-up table for candidate features.
# the second dim is src_seq_len + 2 because we also want to include
# features of start symbol and padding symbol
candidate_features = torch.zeros(
batch_size, src_seq_len + 2, candidate_dim, device=device
)
# TODO: T62502977 create learnable feature vectors for start symbol
# and padding symbol
candidate_features[:, 2:, :] = src_seq
# memory shape: batch_size, src_seq_len, dim_model
memory = self.encode(state, src_seq, src_src_mask)
ranked_per_symbol_probs = torch.zeros(
batch_size, tgt_seq_len, candidate_size, device=device
)
ranked_per_seq_probs = torch.zeros(batch_size, 1)
if self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
# encoder_scores shape: batch_size, src_seq_len
encoder_scores = self.encoder_scorer(memory).squeeze(dim=2)
tgt_out_idx = torch.argsort(encoder_scores, dim=1, descending=True)[
:, :tgt_seq_len
]
# +2 to account for start symbol and padding symbol
tgt_out_idx += 2
# every position has propensity of 1 because we are just using argsort
ranked_per_symbol_probs = ranked_per_symbol_probs.scatter(
2, tgt_out_idx.unsqueeze(2), 1.0
)
ranked_per_seq_probs[:, :] = 1.0
return ranked_per_symbol_probs, ranked_per_seq_probs, tgt_out_idx
tgt_in_idx = (
torch.ones(batch_size, 1, device=device)
.fill_(self._DECODER_START_SYMBOL)
.type(torch.long)
)
assert greedy is not None
for l in range(tgt_seq_len):
tgt_in_seq = gather(candidate_features, tgt_in_idx)
tgt_src_mask = src_src_mask[:, : l + 1, :]
# shape batch_size, l + 1, candidate_size
logits = self.decode(
memory=memory,
state=state,
tgt_src_mask=tgt_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_tgt_mask=subsequent_mask(l + 1, device),
tgt_seq_len=l + 1,
)
# next candidate shape: batch_size, 1
# prob shape: batch_size, candidate_size
next_candidate, prob = self.generator(
mode=self._DECODE_ONE_STEP_MODE,
logits=logits,
tgt_in_idx=tgt_in_idx,
greedy=greedy,
)
ranked_per_symbol_probs[:, l, :] = prob
tgt_in_idx = torch.cat([tgt_in_idx, next_candidate], dim=1)
# remove the decoder start symbol
# tgt_out_idx shape: batch_size, tgt_seq_len
tgt_out_idx = tgt_in_idx[:, 1:]
ranked_per_seq_probs = per_symbol_to_per_seq_probs(
ranked_per_symbol_probs, tgt_out_idx
)
# ranked_per_symbol_probs shape: batch_size, tgt_seq_len, candidate_size
# ranked_per_seq_probs shape: batch_size, 1
# tgt_out_idx shape: batch_size, tgt_seq_len
return ranked_per_symbol_probs, ranked_per_seq_probs, tgt_out_idx
def _log_probs(
self,
state,
src_seq,
tgt_in_seq,
src_src_mask,
tgt_tgt_mask,
tgt_in_idx,
tgt_out_idx,
mode,
):
"""
Compute log of generative probabilities of given tgt sequences
(used for REINFORCE training)
"""
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq, src_src_mask)
tgt_seq_len = tgt_in_seq.shape[1]
src_seq_len = src_seq.shape[1]
assert tgt_seq_len <= src_seq_len
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
tgt_src_mask = src_src_mask[:, :tgt_seq_len, :]
# decoder_logits shape: batch_size, tgt_seq_len, candidate_size
decoder_logits = self.decode(
memory=encoder_output,
state=state,
tgt_src_mask=tgt_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_tgt_mask=tgt_tgt_mask,
tgt_seq_len=tgt_seq_len,
)
# log_probs shape:
# if mode == PER_SEQ_LOG_PROB_MODE: batch_size, 1
# if mode == PER_SYMBOL_LOG_PROB_DIST_MODE: batch_size, tgt_seq_len, candidate_size
log_probs = self._decoder_logits_to_log_probs(
decoder_logits, tgt_in_idx, tgt_out_idx, mode
)
return log_probs
def _decoder_logits_to_log_probs(self, logits, tgt_in_idx, tgt_out_idx, mode):
"""
:param logits: the logits from the decoder, with shape:
(batch_size, seq_len, candidate_size)
:param tgt_in_idx: input idx to the decoder, the first symbol is
always the DECODER_START_SYMBOL. Shape: batch_size x seq_len
:param tgt_out_idx: output idx of the decoder. Shape: batch_size x seq_len
:param mode: return log prob distribution per symbol or reduce them per sequence
"""
assert mode in (
self._PER_SEQ_LOG_PROB_MODE,
self._PER_SYMBOL_LOG_PROB_DIST_MODE,
)
# per_symbol_log_probs: log probability distribution of each symbol
# shape: batch_size, seq_len, candidate_size
per_symbol_log_probs = self.generator(
mode=mode, logits=logits, tgt_in_idx=tgt_in_idx
)
if mode == self._PER_SYMBOL_LOG_PROB_DIST_MODE:
return per_symbol_log_probs
# shape: batch_size, 1
return per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx)
def encoder_output_to_scores(self, state, src_seq, src_src_mask, tgt_out_idx):
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq, src_src_mask)
# encoder_output shape: batch_size, src_seq_len, dim_model
# tgt_out_idx shape: batch_size, tgt_seq_len
batch_size, tgt_seq_len = tgt_out_idx.shape
# order encoder_output by tgt_out_idx
# slate_encoder_output shape: batch_size, tgt_seq_len, dim_model
slate_encoder_output = gather(encoder_output, tgt_out_idx - 2)
# encoder_scores shape: batch_size, tgt_seq_len
return self.encoder_scorer(slate_encoder_output).squeeze()
def encode(self, state, src_seq, src_mask):
# state: batch_size, state_dim
# src_seq: batch_size, src_seq_len, dim_candidate
# src_src_mask shape: batch_size, src_seq_len, src_seq_len
batch_size = src_seq.shape[0]
# candidate_embed: batch_size, src_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(src_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# transform state_embed into shape: batch_size, src_seq_len, dim_model/2
state_embed = state_embed.repeat(1, self.max_src_seq_len).reshape(
batch_size, self.max_src_seq_len, -1
)
# Input at each encoder step is actually concatenation of state_embed
# and candidate embed. state_embed is replicated at each encoding step.
# src_embed shape: batch_size, src_seq_len, dim_model
src_embed = self.positional_encoding_encoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# encoder_output shape: batch_size, src_seq_len, dim_model
return self.encoder(src_embed, src_mask)
def decode(
self, memory, state, tgt_src_mask, tgt_in_seq, tgt_tgt_mask, tgt_seq_len
):
# memory is the output of the encoder, the attention of each input symbol
# memory shape: batch_size, src_seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_seq shape: batch_size, tgt_seq_len, dim_candidate
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
batch_size, src_seq_len, _ = memory.shape
candidate_size = src_seq_len + 2
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
# encoder_scores shape: batch_size, src_seq_len
encoder_scores = self.encoder_scorer(memory).squeeze(dim=2)
logits = torch.zeros(batch_size, tgt_seq_len, candidate_size).to(
encoder_scores.device
)
logits[:, :, :2] = float("-inf")
logits[:, :, 2:] = encoder_scores.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, src_seq_len
)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
# candidate_embed shape: batch_size, tgt_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(tgt_in_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# state_embed: batch_size, tgt_seq_len, dim_model/2
state_embed = state_embed.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, -1
)
# tgt_embed: batch_size, tgt_seq_len, dim_model
tgt_embed = self.positional_encoding_decoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# output of decoder will be later transformed into probabilities over symbols.
# shape: batch_size, tgt_seq_len, dim_model
decoder_output = self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask)
# logits shape: batch_size, seq_len, candidate_size
logits = self.decoder_logit_proj(decoder_output)
return logits
@dataclass
class Seq2SlateNet(ModelBase):
__hash__ = param_hash
state_dim: int
candidate_dim: int
num_stacked_layers: int
dim_model: int
max_src_seq_len: int
max_tgt_seq_len: int
output_arch: Seq2SlateOutputArch
temperature: float
def __post_init_post_parse__(self) -> None:
super(Seq2SlateNet, self).__init__()
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
self.seq2slate = self._build_model()
def _build_model(self):
return None
def input_prototype(self):
return rlt.PreprocessedRankingInput.from_tensors(
state=torch.randn(1, self.state_dim),
src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim),
tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
src_src_mask=torch.ones(1, self.max_src_seq_len, self.max_src_seq_len),
tgt_tgt_mask=torch.ones(1, self.max_tgt_seq_len, self.max_tgt_seq_len),
slate_reward=torch.randn(1),
)
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
res = self.seq2slate(input, mode=mode, tgt_seq_len=tgt_seq_len, greedy=greedy)
if mode == Seq2SlateMode.RANK_MODE:
return rlt.RankingOutput(
ranked_per_symbol_probs=res[0],
ranked_per_seq_probs=res[1],
ranked_tgt_out_idx=res[2],
)
elif mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
return rlt.RankingOutput(log_probs=res)
elif mode == Seq2SlateMode.ENCODER_SCORE_MODE:
return rlt.RankingOutput(encoder_scores=res)
else:
raise NotImplementedError()
def get_distributed_data_parallel_model(self):
return _DistributedSeq2SlateNet(self)
@dataclass
class Seq2SlateTransformerNet(Seq2SlateNet):
__hash__ = param_hash
num_heads: int
dim_feedforward: int
def _build_model(self):
return Seq2SlateTransformerModel(
state_dim=self.state_dim,
candidate_dim=self.candidate_dim,
num_stacked_layers=self.num_stacked_layers,
num_heads=self.num_heads,
dim_model=self.dim_model,
dim_feedforward=self.dim_feedforward,
max_src_seq_len=self.max_src_seq_len,
max_tgt_seq_len=self.max_tgt_seq_len,
output_arch=self.output_arch,
temperature=self.temperature,
)
class _DistributedSeq2SlateNet(ModelBase):
def __init__(self, seq2slate_net: Seq2SlateNet):
super().__init__()
current_device = torch.cuda.current_device()
self.data_parallel = DistributedDataParallel(
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
seq2slate_net.seq2slate,
device_ids=[current_device],
output_device=current_device,
)
self.seq2slate_net = seq2slate_net
def input_prototype(self):
return self.seq2slate_net.input_prototype()
def cpu_model(self):
return self.seq2slate_net.cpu_model()
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
res = self.data_parallel(
input, mode=mode, tgt_seq_len=tgt_seq_len, greedy=greedy
)
if mode == Seq2SlateMode.RANK_MODE:
return rlt.RankingOutput(
ranked_per_symbol_probs=res[0],
ranked_per_seq_probs=res[1],
ranked_tgt_out_idx=res[2],
)
elif mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
return rlt.RankingOutput(log_probs=res)
elif mode == Seq2SlateMode.ENCODER_SCORE_MODE:
return rlt.RankingOutput(encoder_scores=res)
else:
raise NotImplementedError()
|
the-stack_0_11648 | # -*- coding: utf-8 -*-
"""Vertical structure functions for ROMS
:func:`sdepth`
Depth of s-levels
:func:`zslice`
Slice a 3D field in s-coordinates to fixed depth
:func:`multi_zslice`
Slice a 3D field to several depth levels
:func:`z_average`
Vertical average of a 3D field
:func:`s_stretch`
Compute vertical stretching arrays Cs_r or Cs_w
"""
# -----------------------------------
# Bjørn Ådlandsvik <[email protected]>
# Institute of Marine Research
# Bergen, Norway
# 2010-09-30
# -----------------------------------
from __future__ import absolute_import, division
import numpy as np
def sdepth(H, Hc, C, stagger="rho", Vtransform=1):
"""Depth of s-levels
*H* : arraylike
Bottom depths [meter, positive]
*Hc* : scalar
Critical depth
*cs_r* : 1D array
s-level stretching curve
*stagger* : [ 'rho' | 'w' ]
*Vtransform* : [ 1 | 2 ]
defines the transform used, defaults 1 = Song-Haidvogel
Returns an array with ndim = H.ndim + 1 and
shape = cs_r.shape + H.shape with the depths of the
mid-points in the s-levels.
Typical usage::
>>> fid = Dataset(roms_file)
>>> H = fid.variables['h'][:, :]
>>> C = fid.variables['Cs_r'][:]
>>> Hc = fid.variables['hc'].getValue()
>>> z_rho = sdepth(H, Hc, C)
"""
H = np.asarray(H)
Hshape = H.shape # Save the shape of H
H = H.ravel() # and make H 1D for easy shape maniplation
C = np.asarray(C)
N = len(C)
outshape = (N,) + Hshape # Shape of output
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N # Unstretched coordinates
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vtransform == 1: # Default transform by Song and Haidvogel
A = Hc * (S - C)[:, None]
B = np.outer(C, H)
return (A + B).reshape(outshape)
elif Vtransform == 2: # New transform by Shchepetkin
N = Hc * S[:, None] + np.outer(C, H)
D = 1.0 + Hc / H
return (N / D).reshape(outshape)
else:
raise ValueError("Unknown Vtransform")
# ------------------------------------
def sdepth_w(H, Hc, cs_w):
"""Return depth of w-points in s-levels
Kept for backwards compatibility
use *sdepth(H, Hc, cs_w, stagger='w')* instead
"""
return sdepth(H, Hc, cs_w, stagger="w")
# ------------------------------------------
# Vertical slicing e.t.c.
# ------------------------------------------
def zslice(F, S, z):
"""Vertical slice of a 3D ROMS field
Vertical interpolation of a field in s-coordinates to
(possibly varying) depth level
*F* : array with vertical profiles, first dimension is vertical
*S* : array with depths of the F-values,
*z* : Depth level(s) for output, scalar or ``shape = F.shape[1:]``
The z values should be negative
Return value : array, `shape = F.shape[1:]`, the vertical slice
Example:
H is an array of depths (positive values)
Hc is the critical depth
C is 1D containing the s-coordinate stretching at rho-points
returns F50, interpolated values at 50 meter with F50.shape = H.shape
>>> z_rho = sdepth(H, Hc, C)
>>> F50 = zslice(F, z_rho, -50.0)
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
z = np.asarray(z, dtype="float")
Fshape = F.shape # Save original shape
if S.shape != Fshape:
raise ValueError("F and z_r must have same shape")
if z.shape and z.shape != Fshape[1:]:
raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F.size // N # Combined length of horizontal dimension(s)
F = F.reshape((N, M))
S = S.reshape((N, M))
if z.shape:
z = z.reshape((M,))
# Find integer array C with shape (M,)
# with S[C[i]-1, i] < z <= S[C[i], i]
# C = np.apply_along_axis(np.searchsorted, 0, S, z)
# but the following is much faster
C = np.sum(S < z, axis=0)
C = C.clip(1, N - 1)
# For vectorisation
# construct index array tuples D and Dm such that
# F[D][i] = F[C[i], i]
# F[Dm][i] = F[C[i]-1, i]
I = np.arange(M, dtype="int")
D = (C, I)
Dm = (C - 1, I)
# Compute interpolation weights
A = (z - S[Dm]) / (S[D] - S[Dm])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the linear interpolation
R = (1 - A) * F[Dm] + A * F[D]
# Give the result the correct s
R = R.reshape(Fshape[1:])
return R
# -----------------------------------------------
def multi_zslice(F, S, Z):
"""Slice a 3D ROMS field to fixed depth
Vertical interpolation of a field in s-coordinates to
fixed vertical level
*F* : array of with vertical profiles, first dimension is vertical
*S* : array with depth of s-levels (at rho-points)
1D (constant depth) or S.shape = F.shape
*Z* : single depth value, negative
Returns : array, ``shape = F.shape[1:]`` the vertical slice
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
Fshape = F.shape # Save original shape
# Flat all dimensions after first
N = F.shape[0]
M = F.size // N
F = F.reshape((N, M))
S = S.reshape((N, M))
# Make z.shape = (M,)
Z = np.asarray(Z, dtype="float")
# Valid possibilities
# 1) Z = single scalar (shape = ()), one constant value
# 2) Z = 1D array, shape=(kmax), a set of constant depths
# 3) Z = 2D or more, reshapeable to (kmax, M)
if Z.ndim == 0:
Z = Z + np.zeros((1, M))
kmax = 1
elif Z.ndim == 1:
kmax = Z.size
Z = Z[:, np.newaxis] + np.zeros((kmax, M))
else:
kmax = Z.size // M
Z = Z.reshape((kmax, M))
# Find C, C.shape = (kmax, M) such that
# z_r[C[k,i]-1, i] < Z[k] <= z_r[C[k,i], i]
# shape: kmax, N, M => kmax, M
C = np.sum(S[np.newaxis, :, :] < Z[:, np.newaxis, :], axis=1)
C = C.clip(1, N - 1)
# Horizontal index
I = np.arange(M, dtype=int)
# Compute interpolation weights
A = (Z - S[(C - 1, I)]) / (S[(C, I)] - S[(C - 1, I)])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the interpolation
R = (1 - A) * F[(C - 1, I)] + A * F[(C, I)]
# Give the result the correct shape
R = R.reshape((kmax,) + Fshape[1:])
return R
# ------------------------------------------------------
def z_average(F, z_r, z0, z1):
"""Slice a 3D ROMS field to fixed depth
Vertical interpolation of a field in s-coordinates to
fixed vertical level
*F* : array
Vertical profiles, first dimension is vertical
*z_r* : array
Depth of s-levels (at rho-points), requires `z_r.shape = F.shape`
*z0*, *z1* : floats
Single depth values with z0 <= z1 <= 0
return value : array
`shape = F.shape[1:]`, the vertical average
"""
F = np.asarray(F)
z_r = np.asarray(z_r)
Fshape = F.shape # Save original shape
# Flatten all dimensions after first
N = F.shape[0]
M = F.size // N
F = F.reshape((N, M))
z_r = z_r.reshape((N, M))
# z0, z1 are scalars or horizontal arrays
z0 = np.asarray(z0)
if z0.shape: # Array, must be 2D
z0 = z0.reshape((M,))
z1 = np.asarray(z1)
if z1.shape:
z1 = z1.reshape((M,))
# Bracket z0, i.e.
# Find integer array C0 with shape (M,)
# with z_r[C0[i]-1, i] < z0 <= z_r[C0[i], i]
# Can be done with:
# C0 = np.apply_along_axis(np.searchsorted, 0, z_r, z0)
# but the following is much faster
C0 = np.sum(z_r < z0, axis=0)
C0 = C0.clip(1, N - 1) # Clip to avoid illegal indices
# Bracket z1
C1 = np.sum(z_r < z1, axis=0)
C1 = C1.clip(1, N - 1)
# Use advanced indexing for vectorisation
# F[(C0,I)][i] = F[C0[i], i]
I = np.arange(M, dtype="int")
# Interpolate F to the two levels
A0 = (z0 - z_r[(C0 - 1, I)]) / (z_r[(C0, I)] - z_r[(C0 - 1, I)])
A0 = A0.clip(0.0, 1.0) # Control the extrapolation
F0 = (1 - A0) * F[(C0 - 1, I)] + A0 * F[(C0, I)]
A1 = (z1 - z_r[(C1 - 1, I)]) / (z_r[(C1, I)] - z_r[(C1 - 1, I)])
A1 = A1.clip(0.0, 1.0)
F1 = (1 - A1) * F[(C1 - 1, I)] + A1 * F[(C1, I)]
# Find indices again (unclipped)
C0 = np.sum(z_r < z0, axis=0)
C1 = np.sum(z_r < z1, axis=0)
R = np.zeros(M, dtype=np.float64)
X = np.zeros(N + 2, dtype=np.float64)
Y = np.zeros(N + 2, dtype=np.float64)
z0 = z0 + R # Make sure they are spatial arrays
z1 = z1 + R # For indexing below
for i in I:
X[:] = 0.0
Y[:] = 0.0
nz = C1[i] - C0[i] # Number of rho-points between z0 and z1
# Set up arrays for trapezoidal integration
X[0] = z0[i]
X[1 : nz + 1] = z_r[C0[i] : C1[i], i]
X[nz + 1] = z1[i]
Y[0] = F0[i]
Y[1 : nz + 1] = F[C0[i] : C1[i], i]
Y[nz + 1] = F1[i]
# Perform the integration
R[i] = 0.5 * np.dot(
X[1 : nz + 2] - X[0 : nz + 1], Y[1 : nz + 2] + Y[0 : nz + 1]
)
# Compute average and revert to correct shape
R = R / (z1 - z0)
R = R.reshape(Fshape[1:])
return R
# ----------------------------------
def s_stretch(N, theta_s, theta_b, stagger="rho", Vstretching=1):
"""Compute a s-level stretching array
*N* : Number of vertical levels
*theta_s* : Surface stretching factor
*theta_b* : Bottom stretching factor
*stagger* : "rho"|"w"
*Vstretching* : 1|2|3|4|5
"""
# if stagger == "rho":
# S = -1.0 + (0.5 + np.arange(N)) / N
# elif stagger == "w":
# S = np.linspace(-1.0, 0.0, N + 1)
if stagger == "rho":
K = np.arange(0.5, N)
elif stagger == "w":
K = np.arange(N + 1)
else:
raise ValueError("stagger must be 'rho' or 'w'")
S = -1 + K / N
if Vstretching == 1:
cff1 = 1.0 / np.sinh(theta_s)
cff2 = 0.5 / np.tanh(0.5 * theta_s)
return (1.0 - theta_b) * cff1 * np.sinh(theta_s * S) + theta_b * (
cff2 * np.tanh(theta_s * (S + 0.5)) - 0.5
)
elif Vstretching == 2:
a, b = 1.0, 1.0
Csur = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
Cbot = np.sinh(theta_b * (S + 1)) / np.sinh(theta_b) - 1
mu = (S + 1) ** a * (1 + (a / b) * (1 - (S + 1) ** b))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 3:
gamma_ = 3.0
Csur = -np.log(np.cosh(gamma_ * (-S) ** theta_s)) / np.log(np.cosh(gamma_))
# Csur = -np.log(np.cosh(gamma_ * np.abs(S) ** theta_s)) / np.log(np.cosh(gamma_))
Cbot = (
np.log(np.cosh(gamma_ * (S + 1) ** theta_b)) / np.log(np.cosh(gamma_)) - 1
)
mu = 0.5 * (1 - np.tanh(gamma_ * (S + 0.5)))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 4:
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
elif Vstretching == 5:
S1 = (K * K - 2 * K * N + K + N * N - N) / (N * N - N)
S2 = (K * K - K * N) / (1 - N)
S = -S1 - 0.01 * S2
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
else:
raise ValueError("Unknown Vstretching")
# wrapper for backwards compatibility
def s_stretch_w(N, theta_s, theta_b, Vstretching=1):
"""Obsolete use *s_stretch* instead"""
return s_stretch(N, theta_s, theta_b, stagger="w", Vstretching=Vstretching)
|
the-stack_0_11652 | import subprocess
import os
import urllib.request
import sys
from typing import Optional
from conapp.file_paths import get_snapshot_filename
from conapp.validate import validate_subprocess
from conapp.definitions import USER_HOME_DIR, DEFAULT_STRIP_COMPONENTS
def apply_config(file_name: str) -> None:
"""
A wrapper around apply snapshot but for stripping the top level
:param file_name:
:return:
"""
return apply_snapshot(file_name, True)
def apply_snapshot(file_name: str, strip_top_level=False) -> None:
"""Given file_name use tar to apply it to the users home directory"""
if not os.path.isfile(file_name):
print(f"Error! attempted to apply nonexistent snapshot {file_name}")
print(f"Applying snapshot {file_name}")
validate_subprocess(
subprocess.run([
'tar',
'-C',
USER_HOME_DIR,
DEFAULT_STRIP_COMPONENTS if strip_top_level else '',
'--show-transformed-names',
'-zvxf',
file_name,
])
)
def create_snapshot(file_name: str) -> Optional[str]:
file_names_result = get_files_from_tar(file_name, True)
files = list(
filter(
lambda file_path: os.path.isfile(os.path.expanduser(f"~/{file_path}")),
file_names_result.stdout.split()
)
)
if len(files) > 0:
snapshot_name = get_snapshot_filename()
backup_command = [
'tar',
'-C',
USER_HOME_DIR,
'-czvf',
snapshot_name,
] + files
print(f"Local files would get overridden, creating backup of: {' '.join(files)}")
validate_subprocess(subprocess.run(
backup_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
))
print(f"Successfully backed up files to {snapshot_name}")
return snapshot_name
else:
print("No files will be overridden, not creating backup")
return None
def get_files_from_tar(file_name: str, strip_top_level=False) -> subprocess.CompletedProcess:
get_file_names_command = [
"tar",
DEFAULT_STRIP_COMPONENTS if strip_top_level else '',
'--show-transformed-names',
'-tf',
file_name
]
result = subprocess.run(
get_file_names_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
validate_subprocess(result)
return result
def download_file(file_name: str, url: str) -> None:
"""Attempt to download a file or exit"""
try:
print(f"Attempting to download {url}")
urllib.request.urlretrieve(url, file_name)
print(f"Success, downloaded to {file_name}")
except urllib.request.HTTPError as ex:
print(f"Error occurred, does {url} exist?\n{ex}")
sys.exit(-1)
|
the-stack_0_11653 | from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
url(r'^$', views.IndexView.as_view(), name='index'),
# ex: /polls/5/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
|
the-stack_0_11655 | from typing import Tuple
import PIL
import torch
import torchvision.transforms as transforms
from .datasets import DATASET_STATS, SUPPORTED_DATASETS
from .gaussian_blur import GaussianBlur
class SimCLRDataTransform:
"""Applies augmentations to sample two times, as described in SimCLR paper"""
def __init__(self, transform: transforms.Compose):
self.transform = transform
def __call__(self, sample: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
xi = self.transform(sample)
xj = self.transform(sample)
return xi, xj
class ContrastiveAugmentor:
"""Applies augmentation for contrastive learning, as in SimCLR paper"""
def __init__(self, dataset: str, input_size: Tuple[int, int, int]):
"""
Args:
dataset: dataset to apply augmentations to
input_size: input image size
Raises:
ValueError: if specified dataset is unsupported
"""
if dataset not in SUPPORTED_DATASETS:
raise ValueError('Unsupported dataset')
stats = DATASET_STATS[dataset]
h, w = input_size[:2]
size = (h, w)
blur_kernel_size = 2 * int(.05 * h) + 1
color = transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)
augmentations = transforms.Compose([
transforms.Resize(size, interpolation=PIL.Image.LANCZOS),
transforms.RandomResizedCrop(size=size, interpolation=PIL.Image.LANCZOS),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color], p=0.8),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(kernel_size=blur_kernel_size),
transforms.ToTensor(),
transforms.Normalize(mean=stats['mean'], std=stats['std'])
])
self._augmentations = SimCLRDataTransform(augmentations)
def __call__(self, sample: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self._augmentations(sample)
class ValidAugmentor:
"""Applies augmentation for validation and testing"""
def __init__(self, dataset: str, input_size: Tuple[int, int, int]):
"""
Args:
dataset: dataset to apply augmentations to
input_size: input image size
Raises:
ValueError: if specified dataset is unsupported
"""
if dataset not in SUPPORTED_DATASETS:
raise ValueError('Unsupported dataset')
stats = DATASET_STATS[dataset]
h, w = input_size[:2]
size = (h, w)
self._augmentations = transforms.Compose([
transforms.Resize(size=size),
transforms.ToTensor(),
transforms.Normalize(mean=stats['mean'], std=stats['std'])
])
def __call__(self, sample: torch.Tensor) -> torch.Tensor:
return self._augmentations(sample)
class PatchAugmentor:
"""Applies augmentations to patch"""
def __init__(self, input_size: Tuple[int, int, int]):
"""
Args:
input_size: input image size
"""
h, w = input_size[:2]
size = (h, w)
self._augmentations = transforms.Compose([
transforms.Resize(size=size),
transforms.ToTensor()
])
def __call__(self, sample: torch.Tensor) -> torch.Tensor:
return self._augmentations(sample)
|
the-stack_0_11656 | import reveallib
import reveallib64
from utils import *
from multiprocessing.pool import Pool
import signal
import os
import math
import argparse
import logging
import intervaltree
import matplotlib
import sortedcontainers
import time
def plot(plt,anchors,sep,wait=True,nc='r',rc='g',color=None,edges=False,lines=False,alpha=1,args=None):
if len(anchors)==0:
return
if len(anchors[0])==2: #unaligned blocks
for start,stop in anchors:
ax = plt.axes()
if start<sep: #ref
ax.add_patch(
matplotlib.patches.Rectangle(
(start, 0), #bottom left
stop-start, #width
sep, #height #should be qry length!
alpha=.25,
color="blue"
)
)
else:
ax.add_patch(
matplotlib.patches.Rectangle(
(0, start-sep), #bottom left
sep, #width
stop-start, #height
alpha=.25,
color="grey"
)
)
elif len(anchors[0])==3: #mums
for l,sps,revcomp in anchors:
if revcomp:
plt.plot( (sps[0],sps[0]+l), ((sps[1]-sep)+l, (sps[1]-sep)),'%s-'%rc,alpha=alpha)
else:
plt.plot( (sps[0],sps[0]+l), ((sps[1]-sep), (sps[1]-sep)+l),'%s-'%nc,alpha=alpha)
elif len(anchors[0])==4: #synteny blocks, without orientation
for anchor in anchors:
s1,e1,s2,e2=anchor
ax = plt.axes()
ax.add_patch(
matplotlib.patches.Rectangle(
(s1, s2-sep), #bottom left
e1-s1, #width
e2-s2, #height
alpha=.5,
color=color
)
)
elif len(anchors[0])==5: #synteny blocks with orientation
for anchor in anchors:
s1,e1,s2,e2,revcomp=anchor
ax = plt.axes()
ax.add_patch(
matplotlib.patches.Rectangle(
(s1, s2-sep), #bottom left
e1-s1, #width
e2-s2, #height
alpha=.25,
color="green" if revcomp else "red"
)
)
elif len(anchors[0])==8: #synteny blocks with score and ctg
if edges:
for c in [0,2]:
anchors.sort(key=lambda a:a[c])
xedges,yedges=[],[]
panchor=None
for anchor in anchors:
s1,e1,s2,e2,revcomp,score,ref,ctg=anchor
if panchor!=None:
ps1,pe1,ps2,pe2,prevcomp,pscore,pref,pctg=panchor
if pctg!=ctg and pref!=ref:
panchor=anchor
continue
if c==0:
xedges.append(pe1)
xedges.append(s1)
xedges.append(None)
if prevcomp:
yedges.append(ps2-sep)
else:
yedges.append(pe2-sep)
if revcomp:
yedges.append(e2-sep)
else:
yedges.append(s2-sep)
yedges.append(None)
else:
if prevcomp:
xedges.append(ps1)
else:
xedges.append(pe1)
if revcomp:
xedges.append(e1)
else:
xedges.append(s1)
xedges.append(None)
yedges.append(pe2-sep)
yedges.append(s2-sep)
yedges.append(None)
panchor=anchor
if c==0:
plt.plot(xedges,yedges,'b--',alpha=alpha)
else:
plt.plot(xedges,yedges,'y--',alpha=alpha)
if lines:
rcxpoints,xpoints=[],[]
rcypoints,ypoints=[],[]
for anchor in anchors:
s1,e1,s2,e2,revcomp,score,ref,ctg=anchor
# plt.text(s1+((e1-s1)/2),(s2-sep)+(((e2-sep)-(s2-sep))/2) ,str(anchor),fontsize=6)
if revcomp:
# plt.plot((s1,e1), (e2-sep,s2-sep),'g-')
rcxpoints.append(s1)
rcxpoints.append(e1)
rcxpoints.append(None)
rcypoints.append(e2-sep)
rcypoints.append(s2-sep)
rcypoints.append(None)
else:
# plt.plot((s1,e1), (s2-sep,e2-sep),'r-')
xpoints.append(s1)
xpoints.append(e1)
xpoints.append(None)
ypoints.append(s2-sep)
ypoints.append(e2-sep)
ypoints.append(None)
plt.plot(xpoints,ypoints,'r-' if color==None else '%s-'%color,alpha=alpha)
plt.plot(rcxpoints,rcypoints,'g-' if color==None else '%s-'%color,alpha=alpha)
else: #plot squares
for anchor in anchors:
s1,e1,s2,e2,revcomp,score,ref,ctg=anchor
ax = plt.axes()
ax.add_patch(
matplotlib.patches.Rectangle(
(s1, s2-sep), #bottom left
e1-s1, #width
e2-s2, #height
alpha=.25,
color="green" if revcomp else "red"
)
)
if wait:
plt.show()
else:
plt.draw()
def addctginfo(mums,ctg2range):
logging.debug("Augment contig information.")
#add ref information to mums
mums.sort(key=lambda m: m[1][0]) #sort mums by ref domain
intvidx=0
for i in range(len(mums)):
while mums[i][1][0]>ctg2range[intvidx][1]:
intvidx+=1
mums[i]=mums[i]+(intvidx,)
#add contig information to mums
mums.sort(key=lambda m: m[1][1]) #sort mums by query domain
intvidx=0
for i in range(len(mums)):
while mums[i][1][1]>ctg2range[intvidx][1]:
intvidx+=1
mums[i]=mums[i]+(intvidx,)
logging.debug("Done.")
return mums
def transform_cmd(args):
for qry in args.contigs:
logging.info("Running transform for %s"%qry)
transform(args,qry)
logging.info("Done")
def transform(args,qry):
if not args.interactive:
matplotlib.use("agg")
if args.plot:
from matplotlib import pyplot as plt
if args.output==None:
prefix=os.path.splitext(os.path.basename(qry))[0]
else:
prefix=args.output
refnames=[]
ctgnames=[]
if args.sa64:
idx=reveallib64.index()
else:
idx=reveallib.index()
ctg2range=[]
for sample in [args.reference[0],qry]:
idx.addsample(os.path.basename(sample))
for name,seq in fasta_reader(sample, cutN=args.cutn):
if len(seq)<args.minctglength:
logging.debug("Skip transform for contig: %s"%name)
continue
intv=idx.addsequence(seq)
ctg2range.append(intv)
if sample==args.reference[0]:
refnames.append(name)
else:
ctgnames.append(name)
T=idx.T
logging.info("Compute mums.")
idx.construct(rc=False)
mums=addctginfo(idx.getmums(args.minlength),ctg2range)
logging.info("Done, %d mums."%len(mums))
if args.cluster:
logging.info("Cluster mums by diagonals.")
blocks=clustermumsbydiagonal(mums,maxdist=args.maxdist,minclustsize=args.mincluster,rcmums=False)
logging.info("Done, %d clusters."%len(blocks))
else:
blocks=[(mum[1][0], mum[1][0]+mum[0], mum[1][1], mum[1][1]+mum[0], mum[2], mum[0], mum[3], mum[4]) for mum in mums]
# rcidx=idx.copy()
# rcidx.construct(rc=True)
# mums+=rcidx.getmums(args.minlength)
logging.info("Compute RC mums.")
idx.construct(rc=True)
rcmums=addctginfo(idx.getmums(args.minlength),ctg2range)
logging.info("Done, %d rc mums."%len(rcmums))
sep=idx.nsep[0]
idxn=idx.n
rlength=idx.nsep[0]
qlength=idxn-idx.nsep[0]
del idx
if args.cluster:
logging.info("Cluster rc mums by anti-diagonals.")
if len(rcmums)==0:
rcblocks = [(mum[1][0], mum[1][0] + mum[0], mum[1][1], mum[1][1] + mum[0], mum[2], mum[0], mum[3], mum[4]) for mum in rcmums]
else:
rcblocks=clustermumsbydiagonal(rcmums,maxdist=args.maxdist,minclustsize=args.mincluster,rcmums=True)
logging.info("Done, %d rc clusters."%len(rcblocks))
else:
rcblocks=[(mum[1][0], mum[1][0]+mum[0], mum[1][1], mum[1][1]+mum[0], mum[2], mum[0], mum[3], mum[4]) for mum in rcmums]
blocks+=rcblocks
if args.plot:
plot(plt,blocks,sep,wait=False,lines=True,alpha=0.2,args=args)
# if args.plot:
# plot(blocks,sep,wait=False,lines=True)
logging.info("Start glocal chaining for filtering anchors (reference).")
# blocks.sort(key=lambda b: b[1]-b[0])
# logging.info("Largest ref block: %s"%str(blocks[-1]))
# minbacktrack=blocks[-1][1]-blocks[-1][0]
nbefore=len(blocks)
syntenyblocks=blocks
nafter=None
refiteration=0
# maxiter=1
while nbefore!=nafter:# or refiteration==maxiter:
logging.info("Glocal chain iteration %d"%refiteration)
nbefore=len(syntenyblocks)
syntenyblocks=glocalchain(syntenyblocks,rlength,qlength,ctg2range,rearrangecost=args.rearrangecost,
inversioncost=args.inversioncost,
_lambda=args._lambda,
eps=args.eps,
useheap=args.useheap,
lastn=args.lastn,
lastbp=args.lastbp,
alfa=args.alfa,
gapopen=args.gapopen,
axis=0)
nafter=len(syntenyblocks)
logging.info("Anchor before chaining: %s"%nbefore)
logging.info("Anchor after chaining: %s"%nafter)
refiteration+=1
if args.plot:
plot(plt,syntenyblocks,sep,wait=False,lines=True,color='k',alpha=.7)
logging.info("%d anchors remain after glocal chaining (reference)."%len(syntenyblocks))
logging.info("Start glocal chaining for filtering anchors (query).")
nbefore=len(syntenyblocks)
nafter=None
qryiteration=0
while nbefore!=nafter:# or qryiteration==maxiter:
logging.info("Glocal chain iteration %d"%qryiteration)
nbefore=len(syntenyblocks)
syntenyblocks=glocalchain(syntenyblocks,rlength,qlength,ctg2range,rearrangecost=args.rearrangecost,
inversioncost=args.inversioncost,
_lambda=args._lambda,
eps=args.eps,
useheap=args.useheap,
lastn=args.lastn,
lastbp=args.lastbp,
alfa=args.alfa,
gapopen=args.gapopen,
axis=1)
nafter=len(syntenyblocks)
logging.info("Anchor before chaining: %s"%nbefore)
logging.info("Anchor after chaining: %s"%nafter)
qryiteration+=1
# G=localcolinearchains(syntenyblocks,rlength,qlength,rearrangecost=rearrangecost,inversioncost=inversioncost)
# chain,rcchain=colinearchains(syntenyblocks,rlength,qlength)
logging.info("%d anchors remain after glocal chaining (query)."%len(syntenyblocks))
if args.plot:
plot(plt,syntenyblocks,sep,wait=False,lines=True,color='b',alpha=.7)
#take the intersection of both the chains
# logging.info("Determine intersection between the chains...")
# syntenyblocks=list(set(rsyntenyblocks) & set(qsyntenyblocks))
# logging.info("Done. %d chains remain."%len(qsyntenyblocks))
# logging.info("Remove anchors that are contained in other clusters."
# syntenyblocks=remove_contained_blocks(blocks)
# logging.info("Done, %d anchors remain."%len(syntenyblocks))
# logging.info("Done.")
logging.info("Merge consecutive blocks.")
syntenyblocks=merge_consecutive(syntenyblocks)
logging.info("%d blocks after merging consecutive blocks."%len(syntenyblocks))
# if args.plot:
# plot(syntenyblocks,sep,wait=True,lines=True,color='b')
# logging.info("Merge consecutive blocks.")
# syntenyblocks=merge_consecutive(syntenyblocks)
# logging.info("%d blocks after merging consecutive blocks."%len(syntenyblocks))
if args.greedy:
logging.info("Assign overlap between MUMs in a greedy manner.")
syntenyblocks=remove_overlap_greedy_blocks(syntenyblocks)
logging.info("Done.")
else:
logging.info("Assign overlap between MUMs in a conservative manner.")
syntenyblocks=remove_overlap_conservative_blocks(syntenyblocks)
logging.info("Done.")
logging.info("Remove all blocks that are shorter than minchainsum (%d)."%args.minchainsum)
syntenyblocks=[b for b in syntenyblocks if b[5] >= args.minchainsum]
logging.info("%d blocks after filtering for minchainsum."%len(syntenyblocks))
logging.info("Merge consecutive blocks.")
syntenyblocks=merge_consecutive(syntenyblocks)
logging.info("%d blocks after merging consecutive blocks."%len(syntenyblocks))
if args.optimise and len(syntenyblocks)>1:
weight,cost,edgecosts=chainscore(syntenyblocks, rlength, qlength, ctg2range,rearrangecost=args.rearrangecost,inversioncost=args.inversioncost,_lambda=args._lambda,eps=args.eps,alfa=args.alfa,gapopen=args.gapopen) #determine the actual cost of the glocal chain
score=weight-cost
assert(len(edgecosts) == len(syntenyblocks)+1)
iteration=0
while True:
iteration+=1
logging.info("Optimise chain, iteration %d."%iteration)
tsyntenyblocks,tweight,tcost,tedgecosts=optimise(syntenyblocks,rlength, qlength, ctg2range,rearrangecost=args.rearrangecost,inversioncost=args.inversioncost,_lambda=args._lambda,eps=args.eps,alfa=args.alfa,gapopen=args.gapopen)
nscore=tweight-tcost
if nscore<=score:
break
else:
score=nscore
syntenyblocks=tsyntenyblocks
weight=tweight
cost=tcost
edgecosts=tedgecosts
syntenyblocks=merge_consecutive(syntenyblocks)
logging.info("Done. %d blocks after optimisation."%len(syntenyblocks))
syntenyblocks=merge_consecutive(syntenyblocks)
weight,cost,edgecosts=chainscore(syntenyblocks, rlength, qlength, ctg2range,rearrangecost=args.rearrangecost,inversioncost=args.inversioncost,_lambda=args._lambda,eps=args.eps,alfa=args.alfa,gapopen=args.gapopen) #determine the actual cost of the glocal chain
score=weight-cost
assert(len(edgecosts) == len(syntenyblocks)+1)
if args.outputbed: #before extending to the edges of the contig, output the breakpoint regions
logging.info("Write bedfile with contig mappings on reference to: %s.bed"%prefix)
with open(prefix+".bed",'w') as bedout:
block2ctgidx=dict()
pctgid=None
ctgid2lastblock=dict()
ci=0
syntenyblocks.sort(key=lambda b: b[2]) #sort by query
for i,block in enumerate(syntenyblocks): #sorted by query
s1,e1,s2,e2,o,score,refid,ctgid=block
if ctgid!=pctgid:
if pctgid!=None:
ctgid2lastblock[pctgid]=ci
ci=0
else:
ci+=1
block2ctgidx[block]=ci
pctgid=ctgid
ctgid2lastblock[pctgid]=ci
syntenyblocks.sort(key=lambda b: b[0]) #sort by reference
bedout.write("#reference\trefbegin\trefend\tcontig:segmentidx:lastsegmentidx:begin:end\tscore:cost\torientation\taln-start\taln-end\n")
pblock=None
for i,block in enumerate(syntenyblocks): #sorted by reference
s1,e1,s2,e2,o,score,refid,ctgid=block
if i>0:
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=pblock
else:
pblock=None
cost=edgecosts[i] #cost to connect to pblock to block
if i<len(syntenyblocks)-2:
nblock=syntenyblocks[i+1]
ns1,ne1,ns2,ne2,no,nscore,nrefid,nctgid=nblock
else:
nblock=None
ctgoffsets=ctg2range[ctgid]
refoffsets=ctg2range[refid]
if pblock!=None and prefid==refid:
start=(s1-refoffsets[0])-((s1-pe1)/2)
else:
start=s1-refoffsets[0]
if nblock!=None and nrefid==refid:
end=(e1-refoffsets[0])+((ns1-e1)/2)
else:
end=e1-refoffsets[0]
qstart=s2-ctgoffsets[0]
qend=e2-ctgoffsets[0]
chromname=refnames[refid].split()[0]
qi=block2ctgidx[block]
bedout.write("%s\t%d\t%d\t%s:%d:%d:%d:%d\t%d:%d\t%s\t%d\t%d\n"%(chromname, #chrom
start, #start
end, #end
ctgnames[ctgid-len(refnames)].split()[0], #name, make sure there's no whitespace to comply with bed 'format'
qi,
ctgid2lastblock[ctgid],
qstart,
qend,
score,
cost,
'+' if o==False else '-', #strand
s1-refoffsets[0], #thick start
e1-refoffsets[0]) #thick end
#itemRgb
#blockCount
#blockSizes
#blockStarts
)
#bedout.write("%s\t%d\t%d\t%s\t%s\t%s\t%s\n"%(refnames[refid], pe1-refoffsets[0], s1-refoffsets[0], ctgnames[ctgid-len(refnames)], ctgnames[pctgid-len(refnames)], 'n' if po==False else 'r', 'n' if o==False else 'r'))
pblock=block
if args.plot:
plot(plt,syntenyblocks,sep,wait=False,args=args)
logging.debug("Extend %d blocks to query borders."%len(syntenyblocks))
extendblocks(syntenyblocks,ctg2range)
logging.debug("Done.")
if args.plot:
for start,end in ctg2range:
if start<sep:
plt.axvline(x=start, ymin=0, ymax=idxn-sep, linewidth=.1, linestyle='solid')
else:
plt.axhline(y=start-sep, xmin=0, xmax=sep, linewidth=.1, linestyle='solid')
plot(plt,syntenyblocks,sep,wait=False,edges=False,args=args)
plt.xlim(0,rlength)
plt.ylim(0,qlength)
if args.interactive:
plt.show()
else:
plt.savefig("%s.png"%(prefix))
plt.clf()
#determine the subset of mappable contigs from ref and qry
mappablectgs=set()
for s1,e1,s2,e2,o,score,refid,ctgid in syntenyblocks:
mappablectgs.add(ctgid)
mappablectgs.add(refid)
if len(mappablectgs)!=0:
logging.info("Write breakpoint graph to: %s.gfa"%prefix)
write_breakpointgraph(syntenyblocks,T,refnames,ctgnames,mappablectgs,prefix)
else:
logging.info("No mappable contigs.")
def clustermumsbydiagonal(mums,maxdist=90,minclustsize=65,rcmums=True):
logging.debug("Sorting anchors by diagonals...")
if rcmums:
mums.sort(key=lambda m: (m[1][0]+(m[1][1]+m[0]), m[1][0]-(m[1][1]+m[0])) ) #sort mums by anti-diagonal, then diagonal
else:
mums.sort(key=lambda m: (m[1][0]-m[1][1], m[1][0]+m[1][1])) #sort mums by diagonal, then anti-diagonal
logging.debug("Done.")
l,sps,rc,ctg,ref=mums[0]
clusters=[(sps[0],sps[0]+l,sps[1],sps[1]+l,rc,l,ctg,ref)]
update_progress(0,len(mums))
for i in xrange(1,len(mums)):
update_progress(i,len(mums))
l,sps,rc,ctg,ref=mums[i]
s1,e1,s2,e2,prc,score,pctg,pref=clusters[-1]
if rcmums:
d=mums[i][1][0]+(mums[i][1][1]+mums[i][0])
pd=e1+s2
else:
d=mums[i][1][0]-mums[i][1][1]
pd=s1-s2
if d==pd and pctg==ctg and pref==ref: #same diagonal and same contigs
dist=mums[i][1][0]-e1
assert(dist>=0)
if dist < maxdist:
if rc==0:
clusters[-1]=(s1,sps[0]+l,s2,sps[1]+l,rc,score+l,ctg,ref)
else:
clusters[-1]=(s1,sps[0]+l,sps[1],e2,rc,score+l,ctg,ref)
else:
clusters.append((sps[0],sps[0]+l,sps[1],sps[1]+l,rc,l,ctg,ref))
else:
clusters.append((sps[0],sps[0]+l,sps[1],sps[1]+l,rc,l,ctg,ref))
return [c for c in clusters if c[5]>=minclustsize]
def write_breakpointgraph(syntenyblocks,T,refnames,ctgnames,mappablectgs,outputprefix):
#build a breakpoint graph, that we can write to GFA
G=nx.MultiDiGraph()
start=uuid.uuid4().hex
end=uuid.uuid4().hex
G.graph['startnodes']=[start]
G.graph['endnodes']=[end]
G.graph['paths']=[]
G.graph['path2id']={}
G.graph['id2path']={}
G.add_node(start,offsets=dict())
G.add_node(end,offsets=dict())
pid=0
for name in refnames:
if pid in mappablectgs:
# name=os.path.splitext(os.path.basename(reference))[0]+"_"+name
name=os.path.basename(outputprefix+"_"+name)
G.graph['paths'].append(name)
G.graph['path2id'][name]=pid
G.graph['id2path'][pid]=name
G.node[start]['offsets'][pid]=0
else:
logging.info("No contigs were mapped to: %s"%name)
pid+=1
for name in ctgnames:
if pid in mappablectgs:
name="*"+name #prefix so we can recognise the two paths afterwards
G.graph['paths'].append(name)
G.graph['path2id'][name]=pid
G.graph['id2path'][pid]=name
G.node[start]['offsets'][pid]=0
else:
logging.info("Contig: %s could not be uniquely placed on the reference"%name)
pid+=1
#write the reference layout of the query sequences
syntenyblocks.sort(key=lambda b: b[0]) #TODO: check if not already the case..
prefid=None
pnid=None
l=0
mapping=dict()
nid=0
for i,block in enumerate(syntenyblocks):
s1,e1,s2,e2,o,score,refid,ctgid=block
mapping[(s2,e2)]=nid
if refid!=prefid:
if prefid!=None:
G.add_edge(pnid,end,paths=set([prefid]),ofrom="+", oto="+")
pnid=start
l=0
if o==0:
G.add_node(nid,seq=T[s2:e2],offsets={refid:l})
else:
G.add_node(nid,seq=rc(T[s2:e2]),offsets={refid:l})
G.add_edge(pnid,nid,paths=set([refid]),ofrom="+", oto="+")
prefid=refid
pnid=nid
nid+=1
l+=e2-s2
if i!=len(syntenyblocks)-1: #add gap node, so we later know which bubbles are caused by gaps in the assembly
gapsize=1 #TODO: if specified use reference to add a gap
G.add_node(nid,seq="N"*gapsize,offsets={refid:l})
l+=gapsize
G.add_edge(pnid,nid,paths=set([refid]),ofrom="+", oto="+")
pnid=nid
nid+=1
G.add_edge(pnid,end,paths=set([refid]),ofrom="+", oto="+")
writeorg=True
if writeorg: #write the original layout of the query sequences, so we can reconstruct the input afterwards
syntenyblocks.sort(key=lambda b: b[2])
pctgid=None
pnid=None
l=0
for nid,block in enumerate(syntenyblocks):
s1,e1,s2,e2,o,score,refid,ctgid=block
nid=mapping[(s2,e2)]
if ctgid!=pctgid:
if pctgid!=None:
G.add_edge(pnid,end,paths=set([pctgid]),ofrom="+" if o==0 else "-", oto="+")
pnid=start
l=0
po=0
G.node[nid]['offsets'][ctgid]=l
l+=e2-s2
G.add_edge(pnid,nid,paths=set([ctgid]),ofrom="+" if po==0 else "-", oto="+" if o==0 else "-")
po=o
pctgid=ctgid
pnid=nid
G.add_edge(pnid,end,paths=set([ctgid]),ofrom="+" if o==0 else "-", oto="+")
write_gfa(G,None,outputfile=outputprefix if outputprefix.endswith(".gfa") else outputprefix+".gfa")
def merge_consecutive(syntenyblocks):
if len(syntenyblocks)<2:
return syntenyblocks
#first merge consecutive blocks in the chain
syntenyblocks.sort(key=lambda s: s[0]) #order by ref position
qryorder = sorted(xrange(len(syntenyblocks)), key= lambda i: syntenyblocks[i][2]) #qry order
qryorder_inv = sorted(xrange(len(syntenyblocks)), key=qryorder.__getitem__) #inverse qry order
head=0
for ri in xrange(1,len(syntenyblocks)):
pblock=syntenyblocks[ri-1]
block=syntenyblocks[ri]
pqi=qryorder_inv[ri-1] #index within the qryorder of pblock
qi=qryorder_inv[ri] #index within the qryorder of block
ps1,pe2,ps2,pe2,po,pscore,prefid,pctgid=pblock #previous block on reference
s1,e1,s2,e2,o,score,refid,ctgid=block
es1,ee1,es2,ee2,eo,escore,erefid,ectgid=syntenyblocks[head]
if ctgid==pctgid:
if pqi+1==qi and o==po==0:
syntenyblocks[head]=(es1,e1,es2,e2,eo,escore+score,erefid,ectgid)
elif pqi-1==qi and o==po==1:
syntenyblocks[head]=(es1,e1,s2,ee2,eo,escore+score,erefid,ectgid)
else:
head+=1
syntenyblocks[head]=block
else:
head+=1
syntenyblocks[head]=block
while head!=ri:#len(syntenyblocks)-1:
syntenyblocks.pop()
head+=1
return syntenyblocks
def extendblocks(syntenyblocks,ctg2range):
syntenyblocks.sort(key=lambda s: s[0]) #order by reference position
for i in xrange(len(syntenyblocks)):
s1,e1,s2,e2,o,score,ref,ctg=syntenyblocks[i]
if i==0: #first
s1=ctg2range[ref][0]
else:
ps1,pe1,ps2,pe2,po,pscore,pref,pctg=syntenyblocks[i-1]
if pref==ref:
s1=pe1
else:
s1=ctg2range[ref][0]
if i==len(syntenyblocks)-1: #last
e1=ctg2range[ref][1]
else:
ns1,ne1,ns2,ne2,no,nscore,nref,nctg=syntenyblocks[i+1]
if nref==ref:
e1+=((ns1-e1)/2)
else:
e1=ctg2range[ref][1]
assert(s1<e1)
syntenyblocks[i]=(s1,e1,s2,e2,o,score,ref,ctg)
syntenyblocks.sort(key=lambda s: s[2]) #order by qry position
for i in xrange(len(syntenyblocks)):
s1,e1,s2,e2,o,score,ref,ctg=syntenyblocks[i]
if i==0: #first
s2=ctg2range[ctg][0]
else:
ps1,pe1,ps2,pe2,po,pscore,pref,pctg=syntenyblocks[i-1]
if pctg==ctg:
s2=pe2
else:
s2=ctg2range[ctg][0]
if i==len(syntenyblocks)-1: #last
e2=ctg2range[ctg][1]
else:
ns1,ne1,ns2,ne2,no,nscore,nref,nctg=syntenyblocks[i+1]
if nctg==ctg:
e2+=((ns2-e2)/2)
else:
e2=ctg2range[ctg][1]
assert(s2<e2)
syntenyblocks[i]=(s1,e1,s2,e2,o,score,ref,ctg)
def optimise(syntenyblocks,rlength, qlength, ctg2range,rearrangecost=1000,inversioncost=1,_lambda=5,eps=1,alfa=1,gapopen=10):
orgchain=sorted(syntenyblocks,key=lambda c: c[5])
maxchain=syntenyblocks
maxchain_weight,maxchain_cost,maxchain_edgecosts=chainscore(maxchain, rlength, qlength, ctg2range, rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,alfa=alfa,gapopen=gapopen)
maxchainscore=maxchain_weight-maxchain_cost
stack=[]
loglevel=logging.getLogger().getEffectiveLevel()
if loglevel>logging.DEBUG:
update_progress(0,len(orgchain))
for i in xrange(len(orgchain)):
if loglevel>logging.DEBUG:
update_progress(i,len(orgchain))
tmp=list(stack+orgchain[i+1:])
weight,cost,edgecosts=chainscore(tmp, rlength, qlength, ctg2range, rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,alfa=alfa,gapopen=gapopen)
tmpchainscore=weight-cost
if tmpchainscore<maxchainscore:
stack.append(orgchain[i]) #keep it
else:
logging.debug("Dropped block %s, gain: %d"%(orgchain[i],tmpchainscore-maxchainscore))
maxchainscore=tmpchainscore
maxchain=tmp
maxchain_cost=cost
maxchain_weight=weight
maxchain_edgecosts=edgecosts
logging.debug("Optimal chain has %d blocks and scores: %d"%(len(maxchain),maxchainscore))
return maxchain,maxchain_weight,maxchain_cost,maxchain_edgecosts
def chainscore(chain, rlength, qlength, ctg2range, rearrangecost=1000, inversioncost=1, _lambda=5, eps=1, alfa=1, gapopen=10):
# logging.debug("rearrangecost=%d, inversioncost=%d, _lambda=%d, eps=%d, alfa=%d, gapopen=%d"%(rearrangecost, inversioncost, _lambda, eps, alfa, gapopen))
if len(chain)==0:
start=(0,0,rlength,rlength,0,0,0,0)
end=(rlength,rlength,rlength+qlength,rlength+qlength,0,0,0,0)
cost=gapcost(start,end,rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
return 0,cost,[cost]
chain.sort(key=lambda s: s[0]) #order by reference position
qryorder = sorted(xrange(len(chain)), key= lambda i: chain[i][2]) #qry order
qryorder_inv = sorted(xrange(len(chain)), key=qryorder.__getitem__) #inverse qry order
lastqstart,lastqend=ctg2range[chain[-1][7]]
if chain[0][4]==0:
end=(rlength,rlength,lastqend,lastqend,chain[0][4])
else:
end=(rlength,rlength,lastqstart,lastqstart,chain[0][4])
firstqstart,firstqend=ctg2range[chain[0][7]]
if chain[0][4]==0:
start=(0,0,firstqstart,firstqstart,chain[0][4])
else:
start=(0,0,firstqend,firstqend,chain[0][4])
#count out of order traversals
rearrangements=0
inversions=0
startcost=gapcost(start,chain[0],rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
cost=startcost
edgecosts=[startcost]
weight=alfa*chain[0][5]
for ri in xrange(1,len(chain)):
pblock=chain[ri-1]
block=chain[ri]
ps1,pe1,ps2,pe2,po,pscore,pref,pctg=pblock
s1,e1,s2,e2,o,score,ref2,ctg=block
weight+=(alfa*score)
# xgap=0#s1-pe1
pqi=qryorder_inv[ri-1] #index within the qryorder of pblock
qi=qryorder_inv[ri] #index within the qryorder of block
if pctg==ctg and pref==ref2:
if (pqi==qi-1) or (pqi==qi+1): #check if the two blocks are colinear
gc=gapcost(pblock,block,rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
cost+=gc
edgecosts.append(gc)
else: #all other options use rearrangement penalty
rearrangements+=1
cost+=(gapopen+rearrangecost)
edgecosts.append(gapopen+rearrangecost)
else: #cross contigs
if o==0:
if qi>0:
pqs1,pqe1,pqs2,pqe2,pqo,pqscore,pq_ref,pq_ctg=chain[qryorder[qi-1]]
else:
pq_ctg='start'
else:
if qi<len(qryorder)-1:
pqs1,pqe1,pqs2,pqe2,pqo,pqscore,pq_ref,pq_ctg=chain[qryorder[qi+1]]
else:
pq_ctg='end'
if po==0:
if pqi<len(qryorder)-1:
nqs1,nqe1,nqs2,nqe2,nqo,nqscore,nq_ref,nq_ctg=chain[qryorder[pqi+1]]
else:
nq_ctg='end'
else:
if pqi>0:
nqs1,nqe1,nqs2,nqe2,nqo,nqscore,nq_ref,nq_ctg=chain[qryorder[pqi-1]]
else:
nq_ctg='start'
if pq_ctg==ctg or nq_ctg==pctg: #there exists another block on this query contig before changing contigs, so has to be rearranged
rearrangements+=1
cost+=(gapopen+rearrangecost)
edgecosts.append((gapopen+rearrangecost))
else:
edgecosts.append(gapopen) #simple traversal between two contigs
endcost=gapcost(chain[-1],end,rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
cost+=endcost
edgecosts.append(endcost)
return weight,cost,edgecosts
def update_progress(i,n):
fullbar=100
if (i+1) % (n/fullbar if n>fullbar else 1)==0 or i+1==n:
done=int(fullbar*((i+1)/float(n)))
todo=fullbar-done
sys.stdout.write('\r[%s%s]'%("#"*done," "*todo))
if i+1==n:
sys.stdout.write('\n')
sys.stdout.flush()
def glocalchain(syntenyblocks, rlength, qlength, ctg2range, rearrangecost=1000, inversioncost=1, lastn=50, lastbp=10000, useheap=False, axis=0, _lambda=5, eps=1, alfa=1, gapopen=10):
sep=rlength
#add some dummy blocks for the contig start/ends
if axis==0:
for refid,(refstart,refend) in enumerate(ctg2range):
if refstart>=sep:
break
if refid==0:
start=(refstart,refstart,None,None,0,0,None,None)
syntenyblocks.append((refend,refend,None,None,0,0,None,None))
end=syntenyblocks[-1]
if axis==1:
first=True
for ctgid,(ctgstart,ctgend) in enumerate(ctg2range):
if ctgstart<sep:
continue
if first:
start=(None,None,ctgstart,ctgstart,0,0,None,None)
first=False
syntenyblocks.append((None,None,ctgend,ctgend,0,0,None,None))
end=syntenyblocks[-1]
if axis==0: #sort by ref
c1,c2=0,2
else: #sort by qry
c1,c2=2,0
syntenyblocks.sort(key=lambda s: (s[c1],-s[5]) ) #order by reference position, then score
if useheap:
heap=sortedcontainers.SortedList()
heap.add((0,start))
else:
heap=[(0,start)]+[None]*(len(syntenyblocks))
G={b:None for b in syntenyblocks}
maxscore=None
n=len(syntenyblocks)
bt=range(n+1)
update_progress(0,n)
pri=0
t0=time.time()
deepest=0
# best=None
for ri in xrange(n):
block=syntenyblocks[ri]
while syntenyblocks[deepest][c1+1]<block[c1]:
deepest+=1
if ri%1000==0:
t1=time.time()
sec=t1-t0
bd=ri-pri
logging.debug("Blocks per sec: %d"%(bd/sec))
t0=t1
pri=ri
update_progress(ri,n)
s1,e1,s2,e2,o,score,refid,ctgid=block
trace=False
# starttrace=105637436
# endtrace=starttrace+10
# if s1>=starttrace and s1<endtrace: # and refid==ctgtrace:
# # # if block==(4499237, 4502780, 9008394, 9011937, 0, 3543, 0, 1) or block==end:
# logging.info("BLOCK: %s"%str(block))
# print "deepest",syntenyblocks[deepest], syntenyblocks[deepest][c1+1]
# trace=True
bestscore=None
bestblock=None
bestcost=0
# checkedbest=False
l=0
for j in bt: #back track on the heap
if useheap:
if j>=len(heap):
break
cscore,pblock=heap[-j]
else:
i=(ri+1)-j-1
if i<0:
break
cscore,pblock=heap[i]
# if best==None or cscore==best:
# checkedbest=True
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=pblock
if (pblock[c1]==block[c1] and prefid!=None and refid!=None) or (pblock[c1+1]>=block[c1+1] and prefid!=None and refid!=None):
continue
if (pblock[c2]>=block[c2] and prefid!=None and refid!=None) and (pblock[c2+1]<=block[c2+1] and prefid!=None and refid!=None):
continue
l+=1
if bestscore!=None:
if cscore<=bestscore:
if useheap:
break
else:
if block[c1]-pblock[c1]>lastbp and l>=lastn and pblock[c1]<syntenyblocks[deepest][c1]:
break
else:
continue
#if block is a dummy block, make it relative to pblock, if possible
if block[6]==None and pblock[6]!=None: #update current block to be relative to pblock
if axis==0:
_block=(s1, e1, pe2 if po==0 else ps2, pe2 if po==0 else ps2, po, 0, prefid, pctgid)
else:
_block=(pe1 if po==0 else ps1, pe1 if po==0 else ps1, s2, e2, po, 0, prefid, pctgid)
else:
_block=block
#if pblock is a dummy block, make it relative to block, if possible
if pblock[6]==None and block[6]!=None:
if axis==0:
_pblock=(ps1, pe1, s2 if o==0 else e2, s2 if o==0 else e2, o, 0, refid, ctgid)
else:
_pblock=(s1 if o==0 else e1, s1 if o==0 else e1, ps2, pe2, o, 0, refid, ctgid)
else:
_pblock=pblock
#if blocks come from same query contig and reference contig, compute gapcost, else introduce rearrangement cost
if _pblock[6]==_block[6]!=None and _pblock[7]==_block[7]!=None:
c=gapcost(_pblock,_block,rearrangecost=rearrangecost,inversioncost=inversioncost,eps=eps,_lambda=_lambda,gapopen=gapopen,axis=axis)
elif _pblock[6]==_block[6]==None and _pblock[7]==_block[7]==None: #connect two dummy blocks
c=gapopen+(abs(block[c1]-(pblock[c1+1]))*eps)
else: #blocks cross contigs or ref without passing a dummy block, introduce rearrangement cost
pblockctgstart,pblockctgend=ctg2range[_pblock[7]]
blockctgstart,blockctgend=ctg2range[_block[7]]
pblockrefstart,pblockrefend=ctg2range[_pblock[6]]
blockrefstart,blockrefend=ctg2range[_block[6]]
if _pblock[6]==_block[6] and axis==0:
if _pblock[4]==0:
cp=abs( pblockctgend-_pblock[3])
else:
cp=abs( _pblock[2]-pblockctgstart)
if _block[4]==0:
cb=abs( blockctgend-_block[3] )
else:
cb=abs( _block[2]-blockctgstart )
c=gapopen+min((rearrangecost,((cp+cb)*eps)))
elif _pblock[7]==_block[7] and axis==1:
if _pblock[4]==0:
cp=abs( pblockrefend-_pblock[1])
else:
cp=abs( _pblock[0]-pblockrefstart)
if _block[4]==0:
cb=abs( _block[0]-blockrefstart )
else:
cb=abs( blockrefend-_block[1])
c=gapopen+min((rearrangecost,((cp+cb)*eps)))
else:
c=rearrangecost+gapopen+(abs(block[c1]-(pblock[c1+1]))*eps)
assert(c>=0)
if trace:
logging.info("Connect to PBLOCK: %s costs %s, depth=%s, lastbp=%d, cscore,%s, cscore-c=%d, bestscore=%s"%(pblock,c,l,block[c1]-pblock[c1],cscore,cscore-c,bestscore))
if bestscore==None or cscore-c > bestscore:
bestscore=cscore-c
bestblock=pblock
bestcost=c
if not useheap:
if block[c1]-pblock[c1]>lastbp and l>=lastn and pblock[c1]<syntenyblocks[deepest][c1]:
break
# if l>lastn:
# logging.info("Forced deeper %d backtrack for block: %s"%(l,block))
cscore=bestscore+(alfa*score)
# if best==None or cscore>best:
# best=cscore
if useheap:
heap.add((cscore,block))
else:
heap[ri+1]=(cscore,block)
if maxscore==None or maxscore<cscore:
maxscore=cscore
maxnode=block
if trace:
logging.info("CONNECT TO BLOCK: %s, score=%s, cost=%s, depth=%s"%(bestblock,bestscore,bestcost,l))
G[block]=(bestblock,bestscore)
node,cscore=G[end]
chain=[]
while node!=start:# and node!=startrc:
if node[6]!=None: #only add to the chain if it is an actual anchor, exclude contig endpoints
chain.append(node)
s1,e1,s2,e2,o,score,refid,ctgid=node
nnode,score=G[node]
if node==nnode:
logging.fatal("Loop in chain!")
sys.exit(1)
node=nnode
logging.info("Optimal glocal chain contains: %d anchors and scores %d"%(len(chain),cscore))
return chain[::-1]
def gapcost(block1,block2,rearrangecost=10000,inversioncost=0,eps=0,_lambda=0.5,gapopen=10,axis=0):
if axis==0: #sorted by ref
c1,c2=0,2
else: #sorted by qry
c1,c2=2,0
assert(block1[c1]<=block2[c1])
d1=block2[c1]-block1[c1+1]
if block1[4]==block2[4]==0: #both normal orientation
if block2[c2]<block1[c2]:#always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*(d1 if d1>0 else 0) #do not penalize if overlap
return gapopen+indelcost+substitutioncost
else:
d2=block2[c2]-block1[c2+1]
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost
elif block1[4]==block2[4]==1: #both reverse comp orientation
if block2[c2]>block1[c2]: #always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*(d1 if d1>0 else 0)
return gapopen+indelcost+substitutioncost
else:
d2=block1[c2]-block2[c2+1]
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost
elif block1[4]==1 and block2[4]==0:
d1=max((0,d1))
if block2[c2]>block1[c2]:
d2=block2[c2]-block1[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=block1[c2]-block2[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
else:
# assert(block1[4]==0 and block2[4]==1)
d1=max((0,d1))
if block2[c2]>block1[c2]:
d2=block2[c2]-block1[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=block1[c2]-block2[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
def _gapcost(block1,block2,rearrangecost=10000,inversioncost=0,eps=0,_lambda=0.5,gapopen=10,axis=0):
if axis==0: #sorted by ref
c1,c2=0,2
else: #sorted by qry
c1,c2=2,0
assert(block1[c1]<=block2[c1])
d1=abs(block2[c1]-block1[c1+1])
if block1[4]==block2[4]==0: #both normal orientation
if block2[c2]<block1[c2]:#always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*d1
return gapopen+indelcost+substitutioncost
else:
d2=abs(block2[c2]-block1[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost
elif block1[4]==block2[4]==1: #both reverse comp orientation
if block2[c2]>block1[c2]: #always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*d1
return gapopen+indelcost+substitutioncost
else:
d2=abs(block1[c2]-block2[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost
elif block1[4]==1 and block2[4]==0:
if block2[c2]>block1[c2]:
d2=abs(block2[c2]-block1[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=abs(block1[c2]-block2[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
else:
# assert(block1[4]==0 and block2[4]==1)
if block2[c2]>block1[c2]:
d2=abs(block2[c2]-block1[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=abs(block1[c2]-block2[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
def printSA(index,maxline=100,start=0,end=None,fn="sa.txt"):
sa=index.SA
lcp=index.LCP
t=index.T
#so=index.SO
if end==None:
end=len(sa)
# with open(fn,'w') as f:
sys.stdout.write("%d\t%d\n"%(len(sa), len(lcp)))
assert(len(sa)==len(lcp))
for i in xrange(len(sa)):
s=sa[i]
lcpi=lcp[i]
if i>0 and i<len(sa)-1:
l1=lcp[i]
l2=lcp[i+1]
elif i==len(sa)-1:
l1=max([lcp[i-1],lcp[i]])
l2=0
else:
l1=0
l2=lcp[i+1]
if i>=start and i<=end:
#f.write("%s\t%s\t%s\n"%(str(s).zfill(8), str(lcpi).zfill(6), t[s:s+maxline].ljust(maxline) if l1<=maxline else t[s:s+maxline]+"..."+t[s+l1-40:s+l1].ljust(maxline) ) )
sys.stdout.write("%s\t%s\t%s\t%s\t%s\n"%(str(s).zfill(8), str(lcpi).zfill(6), t[s:s+maxline] ,t[s+l1-maxline:s+l1], t[s+l2-maxline:s+l2] ) )
def remove_overlap_conservative_blocks(anchors):
for coord in [0,2]:
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[coord], (m[coord+1]-m[coord])*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[coord] < last[coord+1]: #overlap
if anchor[coord+1]<=last[coord+1]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
for anchor in anchors[1:]:
s1,e1,s2,e2,o,score,refid,ctgid=anchor
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
overlap=(_anchors[-1][coord+1]) - anchor[coord]
pl=pe1-ps1
if overlap > 0: #overlap
if score<=overlap:
continue
assert(score-overlap >= 0)
if o==0:
anchor=(s1+overlap,e1,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
if coord==0:
anchor=(s1+overlap,e1,s2,e2-overlap,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
anchor=(s1,e1-overlap,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
assert(anchor[coord+1]>_anchors[-1][coord+1])
while pl<=overlap or pscore<=overlap:
_anchors.pop()
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
overlap=(_anchors[-1][coord+1]) - anchor[coord]
if overlap<0:
break
pl=pe1-ps1
if overlap>0:
assert(pscore-overlap >= 0)
if po==0:
_anchors[-1]=(ps1,pe1-overlap,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
else:
if coord==0:
_anchors[-1]=(ps1,pe1-overlap, ps2+overlap,pe2, po,pscore-overlap if overlap<pscore else 0, prefid,pctgid)
else:
_anchors[-1]=(ps1+overlap,pe1,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0, prefid,pctgid)
_anchors.append(anchor)
anchors=_anchors
return anchors
def remove_overlap_greedy_blocks(anchors):
#TODO: remove duplicates!
for coord in [0,2]:
if len(anchors)<=1: #by definition no containment
return anchors
update_progress(0,len(anchors))
anchors.sort(key=lambda m: (m[coord], (m[coord+1]-m[coord])*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[coord] < last[coord+1]: #overlap
if anchor[coord+1]<=last[coord+1]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
# for anchor in anchors[1:]:
for i in xrange(1,len(anchors)):
anchor=anchors[i]
update_progress(i,len(anchors))
s1,e1,s2,e2,o,score,refid,ctgid=anchor
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
pl=pe1-ps1
overlap=(_anchors[-1][coord+1]) - anchor[coord]
if overlap > 0: #overlap
if pscore > score: #update current anchor
if score<=overlap:
continue
assert(score-overlap >= 0)
if o==0:
anchor=(s1+overlap,e1,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
if coord==0:
anchor=(s1+overlap,e1,s2,e2-overlap,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
anchor=(s1,e1-overlap,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
_anchors.append(anchor)
else:
while pl<=overlap or pscore<=overlap:
_anchors.pop()
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
overlap=(_anchors[-1][coord+1]) - anchor[coord]
if overlap<0:
break
pl=pe1-ps1
if overlap>0:
assert(pl>overlap)
assert(pscore>overlap)
assert(pscore-overlap >= 0)
if po==0:
_anchors[-1]=(ps1,pe1-overlap,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
else:
if coord==0:
_anchors[-1]=(ps1,pe1-overlap, ps2+overlap,pe2, po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
else:
_anchors[-1]=(ps1+overlap,pe1,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
_anchors.append(anchor)
else:
_anchors.append(anchor)
anchors=_anchors
return anchors
def remove_contained_blocks(anchors):
#remove duplicates!
for coord in [0,2]:
logging.info("Remove overlap in %s dimension."%("first" if coord==0 else "second"))
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[coord], (m[coord+1]-m[coord])*-1) ) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
update_progress(0,len(anchors))
# for anchor in anchors[1:]:
for i in xrange(1,len(anchors)):
anchor=anchors[i]
update_progress(i,len(anchors))
if anchor[coord] < last[coord+1]: #overlap
if anchor[coord+1]<=last[coord+1]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
return anchors
#unused
def remove_overlap_greedy_mums(anchors):
#remove duplicates!
n=2
for coord in range(n):
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[1][coord], m[0]*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
if anchor[1][coord]+anchor[0]<=last[1][coord]+last[0]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
for anchor in anchors[1:]:
overlap=(_anchors[-1][1][coord]+_anchors[-1][0]) - anchor[1][coord]
if overlap > 0: #overlap
if _anchors[-1][0] > anchor[0]:
if anchor[2]==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]+overlap), anchor[2])
else:
if coord==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]), anchor[2])
else:
anchor=(anchor[0]-overlap, (anchor[1][0], anchor[1][1]+overlap), anchor[2])
_anchors.append(anchor)
else:
while _anchors[-1][0]<=overlap and overlap>0:
_anchors.pop()
overlap=(_anchors[-1][1][coord]+_anchors[-1][0]) - anchor[1][coord]
if overlap>0:
if _anchors[-1][2]==0:
_anchors[-1]=(_anchors[-1][0]-overlap,_anchors[-1][1],_anchors[-1][2]) #update stack
else:
if coord==0:
_anchors[-1]=(_anchors[-1][0]-overlap,_anchors[-1][1],_anchors[-1][2])
else:
_anchors[-1]=(_anchors[-1][0]-overlap,(_anchors[-1][1][0]+overlap, _anchors[-1][1][1]),_anchors[-1][2])
_anchors.append(anchor)
else:
_anchors.append(anchor)
anchors=_anchors
return anchors
#unused
def remove_contained_mums(anchors):
#remove duplicates!
for coord in range(2):
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[1][coord], m[0]*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
if anchor[1][coord]+anchor[0]<=last[1][coord]+last[0]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
return anchors
#unused
def remove_overlap_conservative_mums(anchors):
#remove duplicates!
n=2
for coord in range(n):
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[1][coord], m[0]*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
if anchor[1][coord]+anchor[0]<=last[1][coord]+last[0]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
assert(anchor[1][coord]+anchor[0] > last[1][coord]+last[0]) #may not be contained, as we filtered these out already
overlap=(last[1][coord]+last[0])-anchor[1][coord]
assert(overlap>=0)
assert(anchor[0]>overlap)
if anchor[2]==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]+overlap), anchor[2])
else:
if coord==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]), anchor[2])
else:
anchor=(anchor[0]-overlap, (anchor[1][0], anchor[1][1]+overlap), anchor[2])
# assert(last[0]>overlap)
if last[2]==0:
_anchors[-1]=(last[0]-overlap,last[1],last[2]) #update last
else:
if coord==0:
_anchors[-1]=(last[0]-overlap,(last[1][0], last[1][1]+overlap),last[2])
else:
_anchors[-1]=(last[0]-overlap,(last[1][0]+overlap, last[1][1]),last[2])
if _anchors[-1][0]<=0:
_anchors[-1]=anchor
else:
_anchors.append(anchor)
last=anchor
anchors=_anchors
return anchors
|
the-stack_0_11658 | # ------------------------------------------------------------------------------
# Training code.
# Example command:
# python -m torch.distributed.launch --nproc_per_node=4 tools/train_net.py --cfg PATH_TO_CONFIG_FILE
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import argparse
import os
import pprint
import logging
import time
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel
import tools._init_paths
from fvcore.common.file_io import PathManager
from segmentation.config import config, update_config
from segmentation.utils.logger import setup_logger
from segmentation.model import build_segmentation_model_from_cfg
from segmentation.utils import comm
from segmentation.solver import build_optimizer, build_lr_scheduler
from segmentation.data import build_train_loader_from_cfg, build_test_loader_from_cfg
from segmentation.solver import get_lr_group_id
from segmentation.utils import save_debug_images
from segmentation.utils import AverageMeter
from segmentation.utils.utils import get_loss_info_str, to_cuda, get_module
def parse_args():
parser = argparse.ArgumentParser(description='Train segmentation network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger = logging.getLogger('segmentation')
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called
setup_logger(output=config.OUTPUT_DIR, distributed_rank=args.local_rank)
# logger.info(pprint.pformat(args))
# logger.info(config)
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
cudnn.deterministic = config.CUDNN.DETERMINISTIC
cudnn.enabled = config.CUDNN.ENABLED
gpus = list(config.GPUS)
distributed = len(gpus) > 1
device = torch.device('cuda:{}'.format(args.local_rank))
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://",
)
# build model
model = build_segmentation_model_from_cfg(config)
# logger.info("Model:\n{}".format(model))
logger.info("Rank of current process: {}. World size: {}".format(comm.get_rank(), comm.get_world_size()))
if distributed:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(device)
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
data_loader = build_train_loader_from_cfg(config)
optimizer = build_optimizer(config, model)
lr_scheduler = build_lr_scheduler(config, optimizer)
data_loader_iter = iter(data_loader)
start_iter = 0
max_iter = config.TRAIN.MAX_ITER
best_param_group_id = get_lr_group_id(optimizer)
# initialize model
if os.path.isfile(config.MODEL.WEIGHTS):
model_weights = torch.load(config.MODEL.WEIGHTS)
get_module(model, distributed).load_state_dict(model_weights, strict=False)
logger.info('Pre-trained model from {}'.format(config.MODEL.WEIGHTS))
elif config.MODEL.BACKBONE.PRETRAINED:
if os.path.isfile(config.MODEL.BACKBONE.WEIGHTS):
pretrained_weights = torch.load(config.MODEL.BACKBONE.WEIGHTS)
get_module(model, distributed).backbone.load_state_dict(pretrained_weights, strict=False)
logger.info('Pre-trained backbone from {}'.format(config.MODEL.BACKBONE.WEIGHTS))
else:
logger.info('No pre-trained weights for backbone, training from scratch.')
# load model
if config.TRAIN.RESUME:
model_state_file = os.path.join(config.OUTPUT_DIR, 'checkpoint.pth.tar')
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file)
start_iter = checkpoint['start_iter']
get_module(model, distributed).load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
logger.info('Loaded checkpoint (starting from iter {})'.format(checkpoint['start_iter']))
data_time = AverageMeter()
batch_time = AverageMeter()
loss_meter = AverageMeter()
# 显示模型的参数量
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
# return {'Total': total_num/1000000, 'Trainable': trainable_num/1000000}
logger.info('Total:{}M, Trainable:{}M'.format(total_num/1000000, trainable_num/1000000))
print(get_parameter_number(model))
# Debug output.
if config.DEBUG.DEBUG:
debug_out_dir = os.path.join(config.OUTPUT_DIR, 'debug_train')
PathManager.mkdirs(debug_out_dir)
# Train loop.
try:
for i in range(start_iter, max_iter):
# data
start_time = time.time()
data = next(data_loader_iter)
if not distributed:
data = to_cuda(data, device)
data_time.update(time.time() - start_time)
# 取出mini-bach的数据和标签
image = data.pop('image')
label = data.pop('label')
# import imageio
# import numpy as np
# print(label.shape)
# label_image = np.array(label.cpu()[0])
# print(label_image.shape)
# imageio.imwrite('%s/%d_%s.png' % ('./', 1, 'debug_batch_label'), label_image.transpose(1, 2, 0))
# 向前传播
out_dict = model(image, data)
# 计算代价函数
loss = out_dict['loss']
# 清零梯度准备计算
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新训练参数
optimizer.step()
# Get lr.
lr = optimizer.param_groups[best_param_group_id]["lr"]
lr_scheduler.step()
batch_time.update(time.time() - start_time)
loss_meter.update(loss.detach().cpu().item(), image.size(0))
if i == 0 or (i + 1) % config.PRINT_FREQ == 0:
msg = '[{0}/{1}] LR: {2:.7f}\t' \
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Data: {data_time.val:.3f}s ({data_time.avg:.3f}s)\t'.format(
i + 1, max_iter, lr, batch_time=batch_time, data_time=data_time)
msg += get_loss_info_str(get_module(model, distributed).loss_meter_dict)
logger.info(msg)
if i == 0 or (i + 1) % config.DEBUG.DEBUG_FREQ == 0:
if comm.is_main_process() and config.DEBUG.DEBUG:
save_debug_images(
dataset=data_loader.dataset,
label=label,
batch_images=image,
batch_targets=data,
batch_outputs=out_dict,
out_dir=debug_out_dir,
iteration=i,
target_keys=config.DEBUG.TARGET_KEYS,
output_keys=config.DEBUG.OUTPUT_KEYS,
iteration_to_remove=i - config.DEBUG.KEEP_INTERVAL
)
if i == 0 or (i + 1) % config.CKPT_FREQ == 0:
if comm.is_main_process():
torch.save({
'start_iter': i + 1,
'state_dict': get_module(model, distributed).state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
}, os.path.join(config.OUTPUT_DIR, 'checkpoint.pth.tar'))
except Exception:
logger.exception("Exception during training:")
raise
finally:
if comm.is_main_process():
torch.save(get_module(model, distributed).state_dict(),
os.path.join(config.OUTPUT_DIR, 'final_state.pth'))
logger.info("Training finished.")
if __name__ == '__main__':
main()
|
the-stack_0_11659 | # 利用鸢尾花数据集,实现前向传播、反向传播,可视化loss曲线
# 导入所需模块
import tensorflow as tf
from sklearn import datasets
from matplotlib import pyplot as plt
import numpy as np
import time ##1##
# 导入数据,分别为输入特征和标签
x_data = datasets.load_iris().data
y_data = datasets.load_iris().target
# 随机打乱数据(因为原始数据是顺序的,顺序不打乱会影响准确率)
# seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样(为方便教学,以保每位同学结果一致)
np.random.seed(116) # 使用相同的seed,保证输入特征和标签一一对应
np.random.shuffle(x_data)
np.random.seed(116)
np.random.shuffle(y_data)
tf.random.set_seed(116)
# 将打乱后的数据集分割为训练集和测试集,训练集为前120行,测试集为后30行
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
# 转换x的数据类型,否则后面矩阵相乘时会因数据类型不一致报错
x_train = tf.cast(x_train, tf.float32)
x_test = tf.cast(x_test, tf.float32)
# from_tensor_slices函数使输入特征和标签值一一对应。(把数据集分批次,每个批次batch组数据)
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
# 生成神经网络的参数,4个输入特征故,输入层为4个输入节点;因为3分类,故输出层为3个神经元
# 用tf.Variable()标记参数可训练
# 使用seed使每次生成的随机数相同(方便教学,使大家结果都一致,在现实使用时不写seed)
w1 = tf.Variable(tf.random.truncated_normal([4, 3], stddev=0.1, seed=1))
b1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1))
lr = 0.1 # 学习率为0.1
train_loss_results = [] # 将每轮的loss记录在此列表中,为后续画loss曲线提供数据
test_acc = [] # 将每轮的acc记录在此列表中,为后续画acc曲线提供数据
epoch = 500 # 循环500轮
loss_all = 0 # 每轮分4个step,loss_all记录四个step生成的4个loss的和
##########################################################################
m_w, m_b = 0, 0
beta = 0.9
##########################################################################
# 训练部分
now_time = time.time() ##2##
for epoch in range(epoch): # 数据集级别的循环,每个epoch循环一次数据集
for step, (x_train, y_train) in enumerate(train_db): # batch级别的循环 ,每个step循环一个batch
with tf.GradientTape() as tape: # with结构记录梯度信息
y = tf.matmul(x_train, w1) + b1 # 神经网络乘加运算
y = tf.nn.softmax(y) # 使输出y符合概率分布(此操作后与独热码同量级,可相减求loss)
y_ = tf.one_hot(y_train, depth=3) # 将标签值转换为独热码格式,方便计算loss和accuracy
loss = tf.reduce_mean(tf.square(y_ - y)) # 采用均方误差损失函数mse = mean(sum(y-out)^2)
loss_all += loss.numpy() # 将每个step计算出的loss累加,为后续求loss平均值提供数据,这样计算的loss更准确
# 计算loss对各个参数的梯度
grads = tape.gradient(loss, [w1, b1])
##########################################################################
# sgd-momentun
m_w = beta * m_w + (1 - beta) * grads[0]
m_b = beta * m_b + (1 - beta) * grads[1]
w1.assign_sub(lr * m_w)
b1.assign_sub(lr * m_b)
##########################################################################
# 每个epoch,打印loss信息
print("Epoch {}, loss: {}".format(epoch, loss_all / 4))
train_loss_results.append(loss_all / 4) # 将4个step的loss求平均记录在此变量中
loss_all = 0 # loss_all归零,为记录下一个epoch的loss做准备
# 测试部分
# total_correct为预测对的样本个数, total_number为测试的总样本数,将这两个变量都初始化为0
total_correct, total_number = 0, 0
for x_test, y_test in test_db:
# 使用更新后的参数进行预测
y = tf.matmul(x_test, w1) + b1
y = tf.nn.softmax(y)
pred = tf.argmax(y, axis=1) # 返回y中最大值的索引,即预测的分类
# 将pred转换为y_test的数据类型
pred = tf.cast(pred, dtype=y_test.dtype)
# 若分类正确,则correct=1,否则为0,将bool型的结果转换为int型
correct = tf.cast(tf.equal(pred, y_test), dtype=tf.int32)
# 将每个batch的correct数加起来
correct = tf.reduce_sum(correct)
# 将所有batch中的correct数加起来
total_correct += int(correct)
# total_number为测试的总样本数,也就是x_test的行数,shape[0]返回变量的行数
total_number += x_test.shape[0]
# 总的准确率等于total_correct/total_number
acc = total_correct / total_number
test_acc.append(acc)
print("Test_acc:", acc)
print("--------------------------")
total_time = time.time() - now_time ##3##
print("total_time", total_time) ##4##
# 绘制 loss 曲线
plt.title('Loss Function Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Loss') # y轴变量名称
plt.plot(train_loss_results, label="$Loss$") # 逐点画出trian_loss_results值并连线,连线图标是Loss
plt.legend() # 画出曲线图标
plt.show() # 画出图像
# 绘制 Accuracy 曲线
plt.title('Acc Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Acc') # y轴变量名称
plt.plot(test_acc, label="$Accuracy$") # 逐点画出test_acc值并连线,连线图标是Accuracy
plt.legend()
plt.show()
# 请将loss曲线、ACC曲线、total_time记录到 class2\优化器对比.docx 对比各优化器收敛情况
|
the-stack_0_11660 | import cgi
import re
import urllib.parse
import warnings
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
import requests
import requests.auth
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
from poetry.core.packages.package import Package
from poetry.core.packages.utils.link import Link
from poetry.core.semver.helpers import parse_constraint
from poetry.core.semver.version import Version
from poetry.core.semver.version_constraint import VersionConstraint
from poetry.core.semver.version_range import VersionRange
from poetry.locations import REPOSITORY_CACHE_DIR
from poetry.utils.helpers import canonicalize_name
from poetry.utils.patterns import wheel_file_re
from ..config.config import Config
from ..inspection.info import PackageInfo
from ..installation.authenticator import Authenticator
from .exceptions import PackageNotFound
from .exceptions import RepositoryError
from .pypi_repository import PyPiRepository
if TYPE_CHECKING:
from poetry.core.packages.dependency import Dependency
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import html5lib
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url: str, content: str, headers: Dict[str, Any]) -> None:
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self) -> Iterator[Version]:
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self) -> Iterator[Link]:
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urllib.parse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version: Version) -> Iterator[Link]:
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link: Link) -> Optional[Version]:
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url: str) -> str:
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(
self,
name: str,
url: str,
config: Optional[Config] = None,
disable_cache: bool = False,
cert: Optional[Path] = None,
client_cert: Optional[Path] = None,
) -> None:
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._client_cert = client_cert
self._cert = cert
self._cache_dir = REPOSITORY_CACHE_DIR / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._authenticator = Authenticator(
config=config or Config(use_environment=True)
)
self._session = CacheControl(
self._authenticator.session, cache=FileCache(str(self._cache_dir / "_http"))
)
username, password = self._authenticator.get_credentials_for_url(self._url)
if username is not None and password is not None:
self._authenticator.session.auth = requests.auth.HTTPBasicAuth(
username, password
)
if self._cert:
self._authenticator.session.verify = str(self._cert)
if self._client_cert:
self._authenticator.session.cert = str(self._client_cert)
self._disable_cache = disable_cache
@property
def cert(self) -> Optional[Path]:
return self._cert
@property
def client_cert(self) -> Optional[Path]:
return self._client_cert
@property
def authenticated_url(self) -> str:
if not self._session.auth:
return self.url
parsed = urllib.parse.urlparse(self.url)
return "{scheme}://{username}:{password}@{netloc}{path}".format(
scheme=parsed.scheme,
username=quote(self._session.auth.username, safe=""),
password=quote(self._session.auth.password, safe=""),
netloc=parsed.netloc,
path=parsed.path,
)
def find_packages(self, dependency: "Dependency") -> List[Package]:
packages = []
constraint = dependency.constraint
if constraint is None:
constraint = "*"
if not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (
constraint.max is not None
and constraint.max.is_prerelease()
or constraint.min is not None
and constraint.min.is_prerelease()
):
allow_prereleases = True
key = dependency.name
if not constraint.is_any():
key = "{}:{}".format(key, str(constraint))
ignored_pre_release_versions = []
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(dependency.name.replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if version.is_prerelease() and not allow_prereleases:
if constraint.is_any():
# we need this when all versions of the package are pre-releases
ignored_pre_release_versions.append(version)
continue
if constraint.allows(version):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for package_versions in (versions, ignored_pre_release_versions):
for version in package_versions:
package = Package(
dependency.name,
version,
source_type="legacy",
source_reference=self.name,
source_url=self._url,
)
packages.append(package)
self._log(
"{} packages found for {} {}".format(
len(packages), dependency.name, str(constraint)
),
level="debug",
)
if packages or not constraint.is_any():
# we have matching packages, or constraint is not (*)
break
return packages
def package(
self, name: str, version: str, extras: Optional[List[str]] = None
) -> Package:
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that this will be cached so the subsequent operations
should be much faster.
"""
try:
index = self._packages.index(Package(name, version, version))
return self._packages[index]
except ValueError:
package = super(LegacyRepository, self).package(name, version, extras)
package._source_type = "legacy"
package._source_url = self._url
package._source_reference = self.name
return package
def find_links_for_package(self, package: Package) -> List[Link]:
page = self._get("/{}/".format(package.name.replace(".", "-")))
if page is None:
return []
return list(page.links_for_version(package.version))
def _get_release_info(self, name: str, version: str) -> dict:
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise PackageNotFound('No package named "{}"'.format(name))
data = PackageInfo(
name=name,
version=version,
summary="",
platform=None,
requires_dist=[],
requires_python=None,
files=[],
cache_version=str(self.CACHE_VERSION),
)
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise PackageNotFound(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = defaultdict(list)
files = []
for link in links:
if link.is_wheel:
urls["bdist_wheel"].append(link.url)
elif link.filename.endswith(
(".tar.gz", ".zip", ".bz2", ".xz", ".Z", ".tar")
):
urls["sdist"].append(link.url)
h = link.hash
if h:
h = link.hash_name + ":" + link.hash
files.append({"file": link.filename, "hash": h})
data.files = files
info = self._get_info_from_urls(urls)
data.summary = info.summary
data.requires_dist = info.requires_dist
data.requires_python = info.requires_python
return data.asdict()
def _get(self, endpoint: str) -> Optional[Page]:
url = self._url + endpoint
try:
response = self.session.get(url)
if response.status_code == 404:
return
response.raise_for_status()
except requests.HTTPError as e:
raise RepositoryError(e)
if response.status_code in (401, 403):
self._log(
"Authorization error accessing {url}".format(url=response.url),
level="warn",
)
return
if response.url != url:
self._log(
"Response URL {response_url} differs from request URL {url}".format(
response_url=response.url, url=url
),
level="debug",
)
return Page(response.url, response.content, response.headers)
|
the-stack_0_11661 | '''
Client.Processor.* tests.
'''
from tests.integration.util import (
create_client
)
import pytest
import plaid
import json
from plaid.model.processor_token_create_request import ProcessorTokenCreateRequest
from plaid.model.processor_stripe_bank_account_token_create_request import ProcessorStripeBankAccountTokenCreateRequest
def test_stripe_processor_token():
client = create_client()
# Just test the failure case - behavior here depends on the API keys used
with pytest.raises(plaid.ApiException) as e:
request = ProcessorStripeBankAccountTokenCreateRequest(
access_token='fakeAccessToken',
account_id='fakeAccountId',
)
client.processor_stripe_bank_account_token_create(request)
response = json.loads(e.body)
assert response['error_code'] == 'INVALID_INPUT'
def test_dwolla_processor_token():
client = create_client()
# Just test the failure case - behavior here depends on the API keys used
with pytest.raises(plaid.ApiException) as e:
request = ProcessorTokenCreateRequest(
access_token='fakeAccessToken',
account_id='fakeAccountId',
processor='dwolla'
)
client.processor_token_create(request)
response = json.loads(e.body)
assert response['error_code'] == 'INVALID_INPUT'
|
the-stack_0_11664 | import pathlib
from setuptools import setup, find_packages
BASE_DIR = pathlib.Path(__file__).parent
PACKAGE_NAME = 'nlp_api'
VERSION = '0.0.01'
AUTHOR = 'Aivin V. Solatorio'
URL = 'https://github.com/avsolatorio/wb_nlp/app/nlp_api'
LICENSE = 'MIT'
DESCRIPTION = 'Python API'
INSTALL_REQUIRES = ['fastapi']
# Setting up
setup(
name=PACKAGE_NAME,
version=VERSION,
author=AUTHOR,
url=URL,
description=DESCRIPTION,
install_requires=INSTALL_REQUIRES,
packages=find_packages(include=['wb_nlp'])
)
|
the-stack_0_11666 | """
This is a sample simulation that does not represent any particular biological system. It is just a showcase
of how create a Simulation object, add forces, and initialize the reporter.
In this simulation, a simple polymer chain of 10,000 monomers is
"""
import time
import numpy as np
import os, sys
import polychrom
from polychrom import simulation, starting_conformations, forces, forcekits
from polychrom.integrators import ActiveBrownianIntegrator, CorrelatedNoiseIntegrator
import openmm
from polychrom.hdf5_format import HDF5Reporter
from simtk import unit
from pathlib import Path
total_runs = 2500
runs_per_gpu = total_runs // 2
def run_sim(i, gpuid=None, timestep=170, ntimesteps=10000, blocksize=100):
""" Run a single simulation on GPU i."""
N=100
density = 0.224
r = (3 * N / (4 * 3.141592 * density)) ** (1/3)
print(f"Radius of confinement: {r}")
D = np.ones((N, 3))
rhos = 0.5*np.ones((1, N))
rhos[0, 0:20] = -0.5
rhos[0, 20:40] = 0.0
rhos[0, 60:80] = 0.0
timestep = timestep
collision_rate = 2.0
friction = collision_rate * (1.0/unit.picosecond)
conlen = 1.0 * unit.nanometer
mass = 100 * unit.amu
temperature = 300
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
kT = kB * temperature * unit.kelvin
particleD = unit.Quantity(D, kT/(friction * mass))
integrator = CorrelatedNoiseIntegrator(timestep, collision_rate, particleD, rhos)
if gpuid is None:
gpuid = f"{i % 4}"
traj = f"/net/dau/home/dkannan/simulations/corr_sameT/ensemble10000_100/run{i}"
Path(traj).mkdir(parents=True, exist_ok=True)
reporter = HDF5Reporter(folder=traj, max_data_length=100, overwrite=True)
sim = simulation.Simulation(
platform="CUDA",
integrator=integrator,
timestep=timestep,
temperature=temperature,
GPU=gpuid,
collision_rate=collision_rate,
N=N,
save_decimals=2,
PBCbox=False,
reporters=[reporter],
)
polymer = starting_conformations.grow_cubic(N, 5)
sim.set_data(polymer, center=True) # loads a polymer, puts a center of mass at zero
sim.set_velocities(v=np.zeros((N,3)))
sim.add_force(forces.spherical_confinement(sim, density=density, k=5.0))
sim.add_force(
forcekits.polymer_chains(
sim,
chains=[(0, None, False)],
# By default the library assumes you have one polymer chain
# If you want to make it a ring, or more than one chain, use self.setChains
# self.setChains([(0,50,True),(50,None,False)]) will set a 50-monomer ring and a chain from monomer 50 to the end
bond_force_func=forces.harmonic_bonds,
bond_force_kwargs={
"bondLength": 1.0,
"bondWiggleDistance": 0.3, # Bond distance will fluctuate +- 0.05 on average
},
angle_force_func=None,
angle_force_kwargs={},
nonbonded_force_func=forces.polynomial_repulsive,
nonbonded_force_kwargs={
"trunc": 3.0, # this will let chains cross sometimes
#'trunc':10.0, # this will resolve chain crossings and will not let chain cross anymore
},
except_bonds=True,
)
)
tic = time.perf_counter()
for _ in range(ntimesteps): # Do 10 blocks
sim.do_block(blocksize) # Of 100 timesteps each. Data is saved automatically.
toc = time.perf_counter()
print(f'Ran simulation in {(toc - tic):0.4f}s')
sim.print_stats() # In the end, print very simple statistics
reporter.dump_data() # always need to run in the end to dump the block cache to the disk
if __name__ == '__main__':
#run 8 simulations, one on each gpu, for the same parameters
#run_sim(1)
for i in range(1, 1 + 2*runs_per_gpu, 2):
run_sim(i, gpuid="3")
|
the-stack_0_11667 | """
Command line tool to copy experiment metadata from one NeXus file to the other.
"""
import sys
import logging
import argparse
import freephil
from pathlib import Path
from . import (
version_parser,
full_copy_parser,
tristan_copy_parser,
)
from ..nxs_copy import CopyNexus, CopyTristanNexus
# Define a logger object and a formatter
logger = logging.getLogger("CopyNeXus")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s %(message)s")
# Phil scopes
general_scope = freephil.parse(
"""
input{
original_nexus = None
.type = path
.help = "NeXus file to be copied."
data_filename = None
.multiple = True
.type = path
.help = "HDF5 data file."
data_type = *images events
.type = choice
.help = "Type of data in the HDF5 file, can be either images or events."
simple_copy = False
.type = bool
.help = "If True, the full NeXus tree is copied."
skip = NXdata
.multiple = True
.optional = True
.type = str
.help = "NX_class object, or list of, to be skipped when copying metadata.
If called, it will always first skip NXdata."
}
"""
)
tristan_scope = freephil.parse(
"""
input {
tristan_nexus = None
.type = path
.help = "NeXus file associated with Tristan detector"
data_filename = None
.multiple = True
.type = path
.help = "HDF5 file with binned images"
experiment_type = stationary *rotation
.type = choice
.help = "Specify whether an experiment is stationary or a rotation scan. Defaults to rotation."
write_mode = r+ w *x a
.type = choice
.help = "Specify write mode for new NeXus file."
}
"""
)
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Copy metadata from input NeXus file.",
parents=[version_parser],
)
parser.add_argument("--debug", action="store_const", const=True)
parser.add_argument(
"-c",
"--show-config",
action="store_true",
default=False,
dest="show_config",
help="Show the configuration parameters.",
)
# CLIs
def copy_nexus(args):
clai = general_scope.command_line_argument_interpreter()
working_phil = general_scope.fetch(clai.process_and_fetch(args.phil_args))
params = working_phil.extract()
working_phil.show()
logger.info("Copy metadata from one NeXus file to another.")
# Path to data file and original nexus file
data_file = [Path(d).expanduser().resolve() for d in params.input.data_filename]
nexus_file = Path(params.input.original_nexus).expanduser().resolve()
logger.info(f"NeXus file to be copied: {nexus_file}")
logger.info(f"Input data to be saved in NeXus file: {data_file}")
logger.info(f"Data type: {params.input.data_type}")
if params.input.simple_copy is True:
logger.info(f"{nexus_file} will be copied in its entirety.")
else:
logger.info(
f"The following groups will not be copied from NXentry of {nexus_file}: {params.input.skip}"
)
try:
if params.input.data_type == "images":
new_nxs = CopyNexus.images_nexus(
data_file,
nexus_file,
simple_copy=params.input.simple_copy,
skip_group=params.input.skip,
)
elif params.input.data_type == "events":
new_nxs = CopyNexus.pseudo_events_nexus(
data_file,
nexus_file,
)
logger.info(f"File {nexus_file} correctly copied to {new_nxs}.")
except Exception as err:
logger.info(f"File {nexus_file} could not be copied.")
logger.exception(err)
def copy_tristan_nexus(args):
clai = tristan_scope.command_line_argument_interpreter()
working_phil = tristan_scope.fetch(clai.process_and_fetch(args.phil_args))
params = working_phil.extract()
working_phil.show()
logger.info("Copy metadata from Tristan NeXus file.")
# Path to data and original nexus file
data_file = [Path(d).expanduser().resolve() for d in params.input.data_filename]
nexus_file = Path(params.input.tristan_nexus).expanduser().resolve()
logger.info(f"Working directory: {data_file[0].parent}")
logger.info(f"NeXus file to be copied: {nexus_file}")
logger.info(f"Input data to be saved in NeXus file: {data_file}")
try:
if params.input.experiment_type == "stationary":
logger.info(
"Copying metadata for a stationary dataset. \n"
"This means either a single image or a pump-probe experiment.\n"
"The 'scan_axis' will be a single scalar."
)
nxs_img = CopyTristanNexus.single_image_nexus(
data_file[0],
nexus_file,
params.input.write_mode,
)
elif params.input.experiment_type == "rotation":
logger.info(
"Copying metadata for a roation dataset. \n"
"This means either a multiple images or a multi sequences pump-probe experiment.\n"
)
if args.osc_angle:
logger.info(
f"Scan_axis will be a list of values defined by an oscillation angle of {args.osc_angle}."
)
elif args.num_bins:
logger.info(f"Scan_ axis will be a list of {args.num_bins} values.")
for filename in data_file:
nxs_img = CopyTristanNexus.multiple_images_nexus(
filename,
nexus_file,
params.input.write_mode,
args.osc_angle,
args.num_bins,
)
logger.info(
f"Experiment metadata correctly copied from {nexus_file} to {nxs_img}."
)
except Exception as err:
logger.info(f"File {nexus_file} could not be copied.")
logger.exception(err)
# Define subparsers
subparsers = parser.add_subparsers(
help="Choose copy methods.",
required=True,
dest="sub-command",
)
parser_general = subparsers.add_parser(
"gen",
aliases=["copy-file"],
description=("Copy experiment metadata to a new NeXus file."),
parents=[full_copy_parser],
)
parser_general.set_defaults(func=copy_nexus)
parser_tristan = subparsers.add_parser(
"tristan",
aliases=["copy-tristan"],
description=(
"Create a new NeXus file for binned images by copying the metadata from the original experiment NeXus file."
),
parents=[tristan_copy_parser],
)
parser_tristan.set_defaults(func=copy_tristan_nexus)
def main():
# Define a stream handler
CH = logging.StreamHandler(sys.stdout)
CH.setLevel(logging.DEBUG)
CH.setFormatter(formatter)
# Add handler to logger
logger.addHandler(CH)
args = parser.parse_args()
args.func(args)
main()
|
the-stack_0_11669 | #
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""test for str and repr
Make sure things can print and in a nice form. Put all the print tests together so that running this test file alone
can inspect all the print messages in the project
"""
import torch
from torch import nn
from pytorch_quantization import calib
from pytorch_quantization import tensor_quant
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
# pylint:disable=missing-docstring, no-self-use
class TestPrint():
def test_print_descriptor(self):
test_desc = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
print(test_desc)
def test_print_tensor_quantizer(self):
test_quantizer = TensorQuantizer()
print(test_quantizer)
def test_print_module(self):
class _TestModule(nn.Module):
def __init__(self):
super(_TestModule, self).__init__()
self.conv = nn.Conv2d(33, 65, 3)
self.quant_conv = quant_nn.Conv2d(33, 65, 3)
self.linear = nn.Linear(33, 65)
self.quant_linear = quant_nn.Linear(33, 65)
test_module = _TestModule()
print(test_module)
def test_print_calibrator(self):
print(calib.MaxCalibrator(7, 1, False))
hist_calibrator = calib.HistogramCalibrator(8, None, True)
hist_calibrator.collect(torch.rand(10))
print(hist_calibrator)
|
the-stack_0_11670 |
if __name__ != "__main__":
import csv
import time
import pandas as pd
class ObrasBot():
def __init__(self, browser, portal_url, categorias, veiculo, nome_csv, *colunas):
self.browser = browser
self.categorias = [c.upper() for c in categorias]
self.veiculo = [v.upper() for v in veiculo]
self.portal_url = portal_url
self.nome_csv = nome_csv
self.colunas = list(colunas)
#acessa o site
self.browser.get(self.portal_url)
self.browser.maximize_window()
#botao busca avancada
self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[1]/tbody/tr[3]/td/a"
).click()
time.sleep(2)
def criaBase(self):
with open(self.nome_csv, 'w') as base:
writer = csv.DictWriter(base, self.colunas)
writer.writeheader()
def alimentaBase(self, obra, diretores, veiculo, distribuidor, classificacao):
with open(self.nome_csv, 'a') as base:
writer = csv.DictWriter(base, self.colunas)
writer.writerow({self.colunas[0]: obra,
self.colunas[1]: diretores,
self.colunas[2]: veiculo,
self.colunas[3]: distribuidor,
self.colunas[4]: classificacao})
def limpaBase(self):
base = pd.read_csv(self.nome_csv, encoding='ISO-8859-1') #ou latin 1
base.drop_duplicates(inplace=True)
base.to_csv(self.nome_csv, index=False, encoding='ISO-8859-1')
self.browser.quit()
def portalObras(self, obra):
obra = obra.upper().replace('"', '')
#limpa o campo
self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[1]/tbody/tr[1]/td[2]/input").clear()
#digita o titulo
titulo_br_input = self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[1]/tbody/tr[1]/td[2]/input")
for o in obra:
time.sleep(0.1)
titulo_br_input.send_keys(o)
#botao consultar
self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[2]/tbody/tr/td/a"
).click()
time.sleep(2)
linha = 1
while True:
try:
#pega o titulo br e categoria presentes na tabela 1
titulo_br_tabela = self.browser.find_element_by_xpath(
'//*[@id="lista"]/tbody/tr[' + str(linha) + ']/td[1]'
).text
categoria_tabela = self.browser.find_element_by_xpath(
'//*[@id="lista"]/tbody/tr[' + str(linha) + ']/td[4]'
).text
if titulo_br_tabela.strip().upper() == obra and categoria_tabela.strip().upper() in self.categorias:
#abre a página de interesse clicando num botão presente em cada linha da tabela 1
self.browser.find_element_by_xpath('//*[@id="lista"]/tbody/tr[' + str(linha) + ']/td[5]/a').click()
time.sleep(2)
aux = 1
while True:
try:
#acessa a tabela 2, filtra veiculo e novamente a categoria
categoria_tabela2 = self.browser.find_element_by_xpath(
'//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[4]'
).text
veiculo_tabela2 = self.browser.find_element_by_xpath(
'//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[3]'
).text
if categoria_tabela2.strip().upper() in self.categorias and veiculo_tabela2.strip().upper() in self.veiculo:
diretores_tabela = self.browser.find_element_by_xpath(
'//*[@id="TRbl_report_TbObra"]/tbody/tr[6]/td'
).text
#quando é mais de um diretor(a)
#costuma vir assim: Jurandir Muller/Roberto Tibiriçá/Claudia Priscilla
diretores_tabela = diretores_tabela.replace('Diretores:', '').replace('/', ' - ').replace('"', '').strip().upper()
veiculo = self.browser.find_element_by_xpath('//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[3]').text
distribuidor = self.browser.find_element_by_xpath('//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[5]').text
distribuidor = distribuidor.replace('"', "")
classificacao = self.browser.find_element_by_xpath('//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[6]').text
self.alimentaBase(obra, diretores_tabela, veiculo, distribuidor, classificacao)
aux += 1
except:
break
#voltar à página anterior
self.browser.find_element_by_xpath('/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/a').click()
linha += 1
except:
break
|
the-stack_0_11671 | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
import pandas as pd
import time
st.title("Empty charts")
st.write(
"""
This file tests what happens when you pass an empty dataframe or `None` into
a chart.
In some cases, we handle it nicely. In others, we show an error. The reason
for the latter is because some chart types derive their configuration from
the dataframe you pass in at the start. So when there's no dataframe we
cannot detect that configuration.
"""
)
data = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1, 3, 2, 4]})
spec = {
"mark": "line",
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
},
}
st.subheader("Here are 4 empty charts")
st.vega_lite_chart(spec)
st.line_chart()
st.area_chart()
st.bar_chart()
st.write("Below is an empty pyplot chart (i.e. just a blank image)")
st.pyplot()
st.write("...and that was it.")
st.subheader("Here are 2 filled charts")
x = st.vega_lite_chart(spec)
x.vega_lite_chart(data, spec)
x = st.vega_lite_chart(spec)
time.sleep(0.2) # Sleep a little so the add_rows gets sent separately.
x.add_rows(data)
x = st.line_chart()
x.add_rows(data)
x = st.area_chart()
x.add_rows(data)
x = st.bar_chart()
x.add_rows(data)
st.subheader("Here is 1 empty map")
st.deck_gl_chart()
# TODO: Implement add_rows on DeckGL
# st.subheader('1 filled map')
# x = st.deck_gl_chart()
# x.add_rows({'lat': 0, 'lon': 0})
# TODO: write Python tests for these:
# (This manual test doesn't work anymore since errors break execution now)
# st.subheader('Here are 10 errors')
# st.write(1)
# st.vega_lite_chart({})
# st.write(2)
# st.vega_lite_chart(data, {})
# st.write(3)
# st.vega_lite_chart(data)
# st.write(4)
# st.vega_lite_chart()
# st.write(5)
# st.altair_chart()
# st.write(6)
# st.line_chart()
# st.write(7)
# st.area_chart()
# st.write(8)
# st.bar_chart()
# st.write(9)
# st._native_chart()
# st.write(10)
# st.map()
|
the-stack_0_11672 |
from torchvision.transforms import ToPILImage
from datasets.data_utils import DatasetOutput, default_transform
from typing import Callable
from PIL import Image
from .data_utils import slide_windows_over_img, DatasetOutput
import torch
import torch.nn as nn
from torch.utils.data import Dataset
class GenericImageDataset(Dataset):
"""Generic dataset which defines all basic operations for the images."""
def __init__(
self,
path_to_images: str,
get_sub_images: bool = False,
sub_images_nr_windows: int = 10,
sub_images_batch_size: int = 10,
sub_images_min_size: int = 30,
sub_images_max_size: int = 64,
sub_images_stride: float = 0.2,
classification_label: int = 0,
transform: Callable = default_transform,
**kwargs
):
self.path_to_images = path_to_images
self.transform = transform
self.classification_label = classification_label
# Sub images properties
self.get_sub_images = get_sub_images
self.sub_images_min_size = sub_images_min_size
self.sub_images_max_size = sub_images_max_size
self.sub_images_nr_windows = sub_images_nr_windows
self.sub_images_batch_size = sub_images_batch_size
self.sub_images_stride = sub_images_stride
self.pil_transformer = ToPILImage()
# Create store for data
self.store = None
def __getitem__(self, idx: int):
# Read the image from the store index, and a dataset-defined `.read_image`
img: Image = self.read_image(idx)
# Apply transformation to the image
tensor_img: torch.Tensor = self.transform(img)
sub_images: torch.Tensor = torch.tensor(0)
# Extract sub images if applicable
if self.get_sub_images:
sub_images = slide_windows_over_img(
tensor_img,
min_win_size=self.sub_images_min_size,
max_win_size=self.sub_images_max_size,
nr_windows=self.sub_images_nr_windows,
stride=self.sub_images_stride
)
return DatasetOutput(
image=tensor_img,
label=torch.tensor(self.classification_label),
idx=torch.tensor(idx).long(),
sub_images=sub_images
)
def read_image(self, idx: int):
"""Interface, returns an PIL Image using the index."""
pass
def __len__(self):
return len(self.store)
|
the-stack_0_11676 | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('img/entrada/folha-de-mamao-menor.jpg',0)
edges = cv.Canny(img,100,200)
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show() |
the-stack_0_11677 | # coding=utf-8
# Copyright 2021 TF-Transformers Authors and The TensorFlow Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tf_transformers.core import LegacyLayer
from tf_transformers.layers import dense_einsum
from tf_transformers.layers.attention import BartAttention
from tf_transformers.utils import tf_utils
class TransformerBART(LegacyLayer):
"""Transformer
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762).
"""
def __init__(
self,
hidden_size,
num_attention_heads,
intermediate_size,
intermediate_activation,
use_auto_regressive,
attention_head_size=None,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
use_decoder=False,
share_attention_layers=True,
layer_norm_epsilon=None,
is_training=False,
use_dropout=False,
name="transformer",
**kwargs,
):
"""
Args:
num_attention_heads: int, Number of attention heads.
intermediate_size: int, Size of the intermediate layer.
intermediate_activation: keras object, Activation for the intermediate layer.
attention_cfg: The config with which to instantiate `attention_cls`. Ignored
if attention_cls is a layer instance.
dropout_rate: float (between 0 and 1), Dropout probability
for the post-attention and output dropout.
attention_dropout_rate: float (between 0 and 1), Dropout probability
for within the attention layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
share_attention_layers: To share same attention layers in decoder cross attentions
cross_attention_inside_encoder: Whether we want to use cross attention \
inside encoder.
is_decoder: bool
"""
super(TransformerBART, self).__init__(name=name, is_training=is_training, use_dropout=use_dropout, **kwargs)
# mostly embedding_size is same as projecting after attention
self._hidden_size = hidden_size
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_head_size = attention_head_size
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._use_decoder = use_decoder
self._layer_norm_epsilon = layer_norm_epsilon
self._is_training = is_training
self._use_dropout = use_dropout
self._use_auto_regressive = use_auto_regressive
def build(self, input_shape):
"""Build variables based on shape at run time.
Args:
input_shape ([input_word_embeddings 3D, attention_mask 3D]): input_word_embeddings
(b x s x h) and attention_mask (b x 1 x s)
Raises:
ValueError: [description]
ValueError: [description]
"""
input_tensor = input_shape[0]
input_tensor_shape = tf.TensorShape(input_tensor)
batch_size, sequence_length, embedding_size = input_tensor_shape
if not self._attention_head_size:
# If attention_head is None, then make sure
# it can be inferred from (embedding_size // self._num_heads)
if embedding_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (embedding_size, self._num_heads)
)
self._attention_head_size = int(embedding_size // self._num_heads)
# Common kwargs
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
)
# Self Attention Layer
self._attention_layer = BartAttention(
num_heads=self._num_heads,
head_size=self._attention_head_size,
dropout_rate=self._attention_dropout_rate,
name="self_attention",
is_training=self._is_training,
use_decoder=self._use_decoder,
use_auto_regressive=self._use_auto_regressive,
use_dropout=self._use_dropout,
**common_kwargs,
)
# Dense layer
self._attention_output_dense = dense_einsum.DenseEinsum(
output_shape=self._hidden_size, name="self_attention_output", **common_kwargs
)
# Attention Dropout
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Self Attention Norm
self._attention_layer_norm = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._layer_norm_epsilon,
dtype=tf.float32,
)
# Cross Attention for Decoder
if self._use_decoder:
# Cross Attention layer
self._cross_attention_layer = BartAttention(
num_heads=self._num_heads,
head_size=self._attention_head_size,
dropout_rate=self._attention_dropout_rate,
name="cross_attention",
is_training=self._is_training,
use_auto_regressive=self._use_auto_regressive,
use_decoder=self._use_decoder,
use_dropout=self._use_dropout,
**common_kwargs,
)
# Dense
self._cross_attention_output_dense = dense_einsum.DenseEinsum(
output_shape=self._hidden_size, name="cross_attention_output", **common_kwargs
)
# Norm
self._cross_attention_layer_norm = tf.keras.layers.LayerNormalization(
name="cross_attention_layer_norm",
axis=-1,
epsilon=self._layer_norm_epsilon,
dtype=tf.float32,
)
# Main Dense Layer after Attention, with activation
self._intermediate_dense = dense_einsum.DenseEinsum(
output_shape=self._intermediate_size,
activation=self._intermediate_activation,
# This layer is always float32 for numeric stability.
dtype=tf.float32,
name="intermediate",
**common_kwargs,
)
# intermediate Dense
self._output_dense = dense_einsum.DenseEinsum(output_shape=self._hidden_size, name="output", **common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=self._layer_norm_epsilon, dtype=tf.float32
)
super(TransformerBART, self).build(input_shape)
def get_config(self):
config = {
"hidden_size": self._hidden_size,
"num_attention_heads": self._num_heads,
"intermediate_size": self._intermediate_size,
"intermediate_activation": self._intermediate_activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer": tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint": tf.keras.constraints.serialize(self._bias_constraint),
"is_training": self.is_training,
"use_auto_regressive": self._use_auto_regressive,
}
base_config = super(TransformerBART, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call_encoder(self, inputs, cache_key=None, cache_value=None):
"""
Training pipeline
"""
# b x s x h # b x s x s
input_tensor, attention_mask = inputs
# [from_tensor, to_tensor]
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
# attention_inputs = [from_tensor, to_tensor, attention_mask] ((b x s x 768))
attention_output, key, value = self._attention_layer(
attention_inputs, cache_key=cache_key, cache_value=cache_value
)
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output, training=self._use_dropout)
attention_output = self._attention_layer_norm(input_tensor + attention_output)
# mixed precision stability requires Normalization to be in tf.ffloat32
attention_output = tf.cast(attention_output, dtype=tf_utils.get_dtype())
intermediate_output = self._intermediate_dense(attention_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = self._output_layer_norm(layer_output + attention_output)
return layer_output, key, value
def call_decoder(self, inputs, cache_key=None, cache_value=None):
"""
Training pipeline
"""
input_tensor, attention_mask, encoder_output, decoder_encoder_mask = inputs
# Decoder Self Attention (Call goes to bart_attention.py call_training)
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
attention_output, key, value = self._attention_layer(
attention_inputs, cache_key=cache_key, cache_value=cache_value
)
# Self Attention Dense + Norm
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output, training=self.use_dropout)
attention_output = self._attention_layer_norm(attention_output + input_tensor)
if self._use_decoder:
# Cross Attention
attention_output_copy = tf.identity(attention_output, name="attention_output_copy")
attention_inputs_for_decoder = [
attention_output_copy,
encoder_output,
decoder_encoder_mask,
]
# For auto-regressive we need this
# cache_key has to be zeros, because nothng
# to cache in cross_attention
cache_key_cross = None
cache_value_cross = None
if cache_key is not None and self._use_auto_regressive:
cache_key_cross = tf.zeros_like(cache_key)
cache_value_cross = tf.zeros_like(cache_value)
attention_output, _, _ = self._cross_attention_layer(
attention_inputs_for_decoder, cache_key=cache_key_cross, cache_value=cache_value_cross
)
attention_output = self._cross_attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output, training=self.use_dropout)
attention_output_copy = tf.cast(attention_output_copy, dtype=tf_utils.get_dtype())
attention_output = self._cross_attention_layer_norm(attention_output_copy + attention_output)
attention_output = tf.cast(attention_output, dtype=tf_utils.get_dtype())
# Last Projection
intermediate_output = self._intermediate_dense(attention_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = self._output_layer_norm(layer_output + attention_output)
layer_output = tf.cast(layer_output, dtype=tf_utils.get_dtype())
return layer_output, key, value
def call(self, inputs, mode="encoder", cache_key=None, cache_value=None):
"""Call
Args:
inputs ([embeddings 3D, attention_mask 3D]): List of [embeddings,
attention_mask]
mode (str, optional): [description]. Defaults to "encoder".
cache_key ([type], optional): [description]. Defaults to None.
cache_value ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
if self._use_decoder:
outputs = self.call_decoder(inputs, cache_key=cache_key, cache_value=cache_value)
else:
outputs = self.call_encoder(inputs, cache_key=cache_key, cache_value=cache_value)
return outputs
|
the-stack_0_11678 | #!/usr/bin/python
"""
Convert polarised CST element files to OSKAR scalar element pattern format.
"""
from __future__ import print_function
import sys
import numpy
def load_cst_file(filename):
""""
Loads a CST element pattern file into a numpy matrix.
Parameters
----------
filename : string
Path of the CST element pattern file to load.
Returns
-------
Matrix of values from the CST file.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
X = []
for line in lines:
values = line.split()
if not len(values) == 8:
continue
else:
x_all = numpy.array(values, dtype=numpy.dtype('f8'))
X.append(x_all)
return numpy.array(X, dtype=numpy.dtype('f8'))
def convert(cst_file_in, scalar_file_out):
"""
Calculates a scalar element pattern file from a CST element pattern file
Parameters
----------
cst_file_in : string
Input CST format element pattern file
scalar_file_out : string
Output scalar format element pattern file
Notes
-----
This function is designed to be used to create scalar element input files
for the oskar_fit_element_data application.
"""
# Load the CST element pattern data.
X = load_cst_file(cst_file_in)
# Only require columns for:
# Theta, Phi, Abs(Theta), Phase(Theta), Abs(Phi), Phase(Phi)
X = numpy.copy(X[:, [0, 1, 3, 4, 5, 6]])
# Discard any data at values of phi >= 360 degrees,
# as any duplicated entries will cause this method to fail.
X = X[X[:, 1] < 360.0, :]
# Generate the rotated data for Y from X by adding 90 degrees to the phi
# values
Y = numpy.copy(X)
Y[:, 1] += 90.0
Y[Y[:, 1] >= 360.0, 1] -= 360.0
# Linked column sort by phi and then theta for both X and Y.
X = X[numpy.lexsort((X[:, 0], X[:, 1])), :]
Y = Y[numpy.lexsort((Y[:, 0], Y[:, 1])), :]
# Check that the coordinate columns in X and Y now match.
assert numpy.sum(numpy.abs(X[:, 0] - Y[:, 0])) < 1e-6
assert numpy.sum(numpy.abs(X[:, 1] - Y[:, 1])) < 1e-6
# Generate scalar values from sorted data.
X_theta = X[:, 2] * numpy.exp(1j * numpy.radians(X[:, 3]))
X_phi = X[:, 4] * numpy.exp(1j * numpy.radians(X[:, 5]))
Y_theta = Y[:, 2] * numpy.exp(1j * numpy.radians(Y[:, 3]))
Y_phi = Y[:, 4] * numpy.exp(1j * numpy.radians(Y[:, 5]))
s = X_theta * numpy.conj(X_theta) + X_phi * numpy.conj(X_phi) + \
Y_theta * numpy.conj(Y_theta) + Y_phi * numpy.conj(Y_phi)
# Take the sqrt to convert to a 'voltage'
s = numpy.sqrt(0.5 * s)
s_amp = numpy.absolute(s)
s_phase = numpy.angle(s, deg=True)
# Write scalar values to file Columns = (theta, phi, amp, phase).
o = numpy.column_stack((X[:, 0], X[:, 1], s_amp, s_phase))
numpy.savetxt(scalar_file_out, o,
fmt=['%12.4f', '%12.4f', '%20.6e', '%12.4f'])
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: oskar_convert_cst_to_scalar.py "
"<input CST file> <output scalar file>")
sys.exit(1)
convert(sys.argv[1], sys.argv[2])
|
the-stack_0_11679 | import asyncio
from datetime import datetime
import io
import os
from pathlib import Path
from telethon import events, functions, types
from telethon.tl.types import InputMessagesFilterDocument
from . import *
@bot.on(phoenix_cmd(pattern=r"cmds"))
@bot.on(sudo_cmd(pattern=r"cmds", allow_sudo=True))
async def kk(event):
if event.fwd_from:
return
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
cmd = "ls phoenix/plugins"
thumb = phoenix_logo
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
o = stdout.decode()
_o = o.split("\n")
o = "\n".join(_o)
OUTPUT = f"List of Plugins in bot :- \n\n{o}\n\n<><><><><><><><><><><><><><><><><><><><><><><><>\nHELP:- If you want to know the commands for a plugin, do :- \n.plinfo <plugin name> without the < > brackets. \nJoin {hell_grp} for help."
if len(OUTPUT) > 69:
with io.BytesIO(str.encode(OUTPUT)) as out_file:
out_file.name = "cmd_list.text"
phoenix_file = await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
thumb=thumb,
reply_to=reply_to_id,
)
await edit_or_reply(phoenix_file, f"Output Too Large. This is the file for the list of plugins in bot.\n\n**BY :-** {PHOENIX_USER}")
await event.delete()
@bot.on(phoenix_cmd(pattern=r"send (?P<shortname>\w+)", outgoing=True))
@bot.on(sudo_cmd(pattern=r"send (?P<shortname>\w+)", allow_sudo=True))
async def send(event):
if event.fwd_from:
return
message_id = event.message.id
thumb = phoenix_logo
input_str = event.pattern_match.group(1)
omk = f"**• Plugin name ≈** `{input_str}`\n**• Uploaded by ≈** {phoenix_mention}\n\n⚡ **[ρнσєηιχ ]({chnl_link})** ⚡"
the_plugin_file = "./hellbot/plugins/{}.py".format(input_str)
if os.path.exists(the_plugin_file):
lauda = await event.client.send_file(
event.chat_id,
the_plugin_file,
thumb=thumb,
caption=omk,
force_document=True,
allow_cache=False,
reply_to=message_id,
)
await event.delete()
else:
await eod(event, "File not found..... Kk vaii !!!")
@bot.on(phoenix_cmd(pattern="install$", outgoing=True))
@bot.on(sudo_cmd(pattern="install$", allow_sudo=True))
async def install(event):
if event.fwd_from:
return
a = "__Installing.__"
b = 1
await event.edit(a)
if event.fwd_from:
return
if event.reply_to_msg_id:
try:
downloaded_file_name = await event.client.download_media( # pylint:disable=E0602
await event.get_reply_message(),
"./phoenix/plugins/" # pylint:disable=E0602
)
if "(" not in downloaded_file_name:
path1 = Path(downloaded_file_name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
if shortname in CMD_LIST:
string = "**Commands found in** `{}`\n".format((os.path.basename(downloaded_file_name)))
for i in CMD_LIST[shortname]:
string += " • `" + i
string += "`\n"
if b == 1:
a = "__Installing..__"
b = 2
else:
a = "__Installing...__"
b = 1
await eor(event, a)
return await eor(event, f"✅ **Installed module** :- `{shortname}` \n✨ BY :- {phoenix_mention}\n\n{string}\n\n ⚡ **[ʟɛɢɛռɖaʀʏ ᴀғ ρнσєηιχ]({chnl_link})** ⚡", link_preview=False)
return await eor(event, f"Installed module `{os.path.basename(downloaded_file_name)}`")
else:
os.remove(downloaded_file_name)
return await eod(event, f"**Failed to Install** \n`Error`\nModule already installed or unknown format")
except Exception as e:
await eod(event, f"**Failed to Install** \n`Error`\n{str(e)}")
return os.remove(downloaded_file_name)
@bot.on(phoenix_cmd(pattern=r"uninstall (?P<shortname>\w+)", outgoing=True))
@bot.on(sudo_cmd(pattern=r"uninstall (?P<shortname>\w+)", allow_sudo=True))
async def uninstall(kraken):
if kraken.fwd_from:
return
shortname = kraken.pattern_match["shortname"]
dir_path =f"./phoenix/plugins/{shortname}.py"
try:
remove_plugin(shortname)
os.remove(dir_path)
await eod(kraken, f"Uninstalled `{shortname}` successfully")
except OSError as e:
await kraken.edit("Error: %s : %s" % (dir_path, e.strerror))
@bot.on(phoenix_cmd(pattern=r"unload (?P<shortname>\w+)$"))
@bot.on(sudo_cmd(pattern=r"unload (?P<shortname>\w+)$", allow_sudo=True))
async def unload(event):
if event.fwd_from:
return
shortname = event.pattern_match["shortname"]
try:
remove_plugin(shortname)
await event.edit(f"Successfully unloaded `{shortname}`")
except Exception as e:
await event.edit(
"Successfully unloaded {shortname}\n{}".format(
shortname, str(e)
)
)
@bot.on(phoenix_cmd(pattern=r"load (?P<shortname>\w+)$"))
@bot.on(sudo_cmd(pattern=r"load (?P<shortname>\w+)$", allow_sudo=True))
async def load(event):
if event.fwd_from:
return
shortname = event.pattern_match["shortname"]
try:
try:
remove_plugin(shortname)
except BaseException:
pass
load_module(shortname)
await event.edit(f"Successfully loaded `{shortname}`")
except Exception as e:
await event.edit(
f"Sorry, could not load {shortname} because of the following error.\n{str(e)}"
)
CmdHelp("core").add_command(
"install", "<reply to a .py file>", "Installs the replied python file if suitable to Hêllẞø†'s codes."
).add_command(
"uninstall", "<plugin name>", "Uninstalls the given plugin from ρнσєηιχ. To get that again do .restart", "uninstall alive"
).add_command(
"load", "<plugin name>", "Loades the unloaded plugin to your userbot", "load alive"
).add_command(
"unload", "<plugin name>", "Unloads the plugin from your userbot", "unload alive"
).add_command(
"send", "<file name>", "Sends the given file from your userbot server, if any.", "send alive"
).add_command(
"cmds", None, "Gives out the list of modules in HellBot."
).add_warning(
"❌ Install External Plugin On Your Own Risk. We won't help if anything goes wrong after installing a plugin."
).add()
# hellbot
|
the-stack_0_11682 | #!/usr/bin/python
#
# sslsniff Captures data on read/recv or write/send functions of OpenSSL and
# GnuTLS
# For Linux, uses BCC, eBPF.
#
# USAGE: sslsniff.py [-h] [-p PID] [-c COMM] [-o] [-g] [-d]
#
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 12-Aug-2016 Adrian Lopez Created this.
# 13-Aug-2016 Mark Drayton Fix SSL_Read
# 17-Aug-2016 Adrian Lopez Capture GnuTLS and add options
#
from __future__ import print_function
import ctypes as ct
from bcc import BPF
import argparse
# arguments
examples = """examples:
./sslsniff # sniff OpenSSL and GnuTLS functions
./sslsniff -p 181 # sniff PID 181 only
./sslsniff -c curl # sniff curl command only
./sslsniff --no-openssl # don't show OpenSSL calls
./sslsniff --no-gnutls # don't show GnuTLS calls
"""
parser = argparse.ArgumentParser(
description="Sniff SSL data",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", type=int, help="sniff this PID only.")
parser.add_argument("-c", "--comm",
help="sniff only commands matching string.")
parser.add_argument("-o", "--no-openssl", action="store_false", dest="openssl",
help="do not show OpenSSL calls.")
parser.add_argument("-g", "--no-gnutls", action="store_false", dest="gnutls",
help="do not show GnuTLS calls.")
parser.add_argument('-d', '--debug', dest='debug', action='count', default=0,
help='debug mode.')
args = parser.parse_args()
prog = """
#include <linux/ptrace.h>
#include <linux/sched.h> /* For TASK_COMM_LEN */
struct probe_SSL_data_t {
u64 timestamp_ns;
u32 pid;
char comm[TASK_COMM_LEN];
char v0[464];
u32 len;
};
BPF_PERF_OUTPUT(perf_SSL_write);
int probe_SSL_write(struct pt_regs *ctx, void *ssl, void *buf, int num) {
u32 pid = bpf_get_current_pid_tgid();
FILTER
struct probe_SSL_data_t __data = {0};
__data.timestamp_ns = bpf_ktime_get_ns();
__data.pid = pid;
__data.len = num;
bpf_get_current_comm(&__data.comm, sizeof(__data.comm));
if ( buf != 0) {
bpf_probe_read(&__data.v0, sizeof(__data.v0), buf);
}
perf_SSL_write.perf_submit(ctx, &__data, sizeof(__data));
return 0;
}
BPF_PERF_OUTPUT(perf_SSL_read);
BPF_HASH(bufs, u32, u64);
int probe_SSL_read_enter(struct pt_regs *ctx, void *ssl, void *buf, int num) {
u32 pid = bpf_get_current_pid_tgid();
FILTER
bufs.update(&pid, (u64*)&buf);
return 0;
}
int probe_SSL_read_exit(struct pt_regs *ctx, void *ssl, void *buf, int num) {
u32 pid = bpf_get_current_pid_tgid();
FILTER
u64 *bufp = bufs.lookup(&pid);
if (bufp == 0) {
return 0;
}
struct probe_SSL_data_t __data = {0};
__data.timestamp_ns = bpf_ktime_get_ns();
__data.pid = pid;
__data.len = PT_REGS_RC(ctx);
bpf_get_current_comm(&__data.comm, sizeof(__data.comm));
if (bufp != 0) {
bpf_probe_read(&__data.v0, sizeof(__data.v0), (char *)*bufp);
}
bufs.delete(&pid);
perf_SSL_read.perf_submit(ctx, &__data, sizeof(__data));
return 0;
}
"""
if args.pid:
prog = prog.replace('FILTER', 'if (pid != %d) { return 0; }' % args.pid)
else:
prog = prog.replace('FILTER', '')
if args.debug:
print(prog)
b = BPF(text=prog)
# It looks like SSL_read's arguments aren't available in a return probe so you
# need to stash the buffer address in a map on the function entry and read it
# on its exit (Mark Drayton)
#
if args.openssl:
b.attach_uprobe(name="ssl", sym="SSL_write", fn_name="probe_SSL_write",
pid=args.pid or -1)
b.attach_uprobe(name="ssl", sym="SSL_read", fn_name="probe_SSL_read_enter",
pid=args.pid or -1)
b.attach_uretprobe(name="ssl", sym="SSL_read",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
if args.gnutls:
b.attach_uprobe(name="gnutls", sym="gnutls_record_send",
fn_name="probe_SSL_write", pid=args.pid or -1)
b.attach_uprobe(name="gnutls", sym="gnutls_record_recv",
fn_name="probe_SSL_read_enter", pid=args.pid or -1)
b.attach_uretprobe(name="gnutls", sym="gnutls_record_recv",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
# define output data structure in Python
TASK_COMM_LEN = 16 # linux/sched.h
MAX_BUF_SIZE = 464 # Limited by the BPF stack
# Max size of the whole struct: 512 bytes
class Data(ct.Structure):
_fields_ = [
("timestamp_ns", ct.c_ulonglong),
("pid", ct.c_uint),
("comm", ct.c_char * TASK_COMM_LEN),
("v0", ct.c_char * MAX_BUF_SIZE),
("len", ct.c_uint)
]
# header
print("%-12s %-18s %-16s %-6s %-6s" % ("FUNC", "TIME(s)", "COMM", "PID",
"LEN"))
# process event
start = 0
def print_event_write(cpu, data, size):
print_event(cpu, data, size, "WRITE/SEND")
def print_event_read(cpu, data, size):
print_event(cpu, data, size, "READ/RECV")
def print_event(cpu, data, size, rw):
global start
event = ct.cast(data, ct.POINTER(Data)).contents
# Filter events by command
if args.comm:
if not args.comm == event.comm:
return
if start == 0:
start = event.timestamp_ns
time_s = (float(event.timestamp_ns - start)) / 1000000000
s_mark = "-" * 5 + " DATA " + "-" * 5
e_mark = "-" * 5 + " END DATA " + "-" * 5
truncated_bytes = event.len - MAX_BUF_SIZE
if truncated_bytes > 0:
e_mark = "-" * 5 + " END DATA (TRUNCATED, " + str(truncated_bytes) + \
" bytes lost) " + "-" * 5
print("%-12s %-18.9f %-16s %-6d %-6d\n%s\n%s\n%s\n\n" % (rw, time_s,
event.comm.decode(),
event.pid,
event.len,
s_mark,
event.v0.decode(),
e_mark))
b["perf_SSL_write"].open_perf_buffer(print_event_write)
b["perf_SSL_read"].open_perf_buffer(print_event_read)
while 1:
b.kprobe_poll()
|
the-stack_0_11683 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import json
import time
import logging
from . import builder_config
from .utils import save_profiled_results, merge_info
from nn_meter.builder.backends import connect_backend
logging = logging.getLogger("nn-Meter")
def convert_models(backend, models, mode = 'predbuild', broken_point_mode = False):
""" convert the model to the needed format by backend, in order to increase efficiency when profiling on device.
@params:
backend (subclass instance of BaseBackend): applied backend instance
models (str or dict): the Dict of models or the path of the json file about models information
mode (str): the mode for running models, including ['ruletest', 'predbuild']
broken_point_mode (boolean): broken_point_mode will skip all models have attributes "converted_model"
"""
if isinstance(models, str):
save_name = os.path.basename(models)
with open(models, 'r') as fp:
models = json.load(fp)
else:
save_name = "converted_results.json"
workspace_path = builder_config.get('WORKSPACE', mode)
model_save_path = os.path.join(workspace_path, 'models')
os.makedirs(model_save_path, exist_ok=True)
info_save_path = os.path.join(workspace_path, "results")
os.makedirs(info_save_path, exist_ok=True)
# convert models
count = 0
for module in models.values():
for id, model in module.items():
if broken_point_mode and 'converted_model' in model:
continue
try:
model_path = model['model']
converted_model = backend.convert_model(model_path, model_save_path, model['shapes'])
model['converted_model'] = converted_model
except Exception as e:
open(os.path.join(info_save_path, "convert_error.log"), 'a').write(f"{id}: {e}\n")
# save information to json file for per 50 models
count += 1
if count % 50 == 0:
with open(os.path.join(info_save_path, save_name), 'w') as fp:
json.dump(models, fp, indent=4)
logging.keyinfo(f"{count} models complete. Still converting... Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
with open(os.path.join(info_save_path, save_name), 'w') as fp:
json.dump(models, fp, indent=4)
logging.keyinfo(f"Complete convert all {count} models. Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
# save information to json file
with open(os.path.join(info_save_path, save_name), 'w') as fp:
json.dump(models, fp, indent=4)
logging.keyinfo(f"Save the converted models information to {os.path.join(info_save_path, save_name)}")
return models
def profile_models(backend, models, mode = 'ruletest', metrics = ["latency"], save_name = None,
have_converted = False, log_frequency = 50, broken_point_mode = False, **kwargs):
""" run models with given backend and return latency of testcase models
@params:
backend (subclass instance of BaseBackend): applied backend instance
models (str or dict): the Dict of models or the path of the json file about models information
mode (str): the mode for running models, including ['ruletest', 'predbuild']
metrics (list): required metrics to report. We only support latency for metric by now.
save_name (str): the save name to store profiled results. The whole path should be `<workspace>/<mode-folder>/results/<save-name>`
have_converted (boolean): if the model have been converted to the needed format by backend, the model will not be converted
before profiling. The model path of `model['converted_model']` will be profiled on device directly. The conversion of
model could be done by appling `nn_meter.builder.convert_models`
broken_point_mode (boolean): broken_point_mode will check file in `<workspace>/<mode-folder>/results/<save-name>` (if the file exists)
and skip all models already have attributes "latency"
**kwargs: arguments for profiler, such as `taskset` and `close_xnnpack` in TFLite profiler
"""
if isinstance(models, str):
with open(models, 'r') as fp:
models = json.load(fp)
workspace_path = builder_config.get('WORKSPACE', mode)
model_save_path = os.path.join(workspace_path, 'models')
os.makedirs(model_save_path, exist_ok=True)
info_save_path = os.path.join(workspace_path, "results")
os.makedirs(info_save_path, exist_ok=True)
# in broken point model, if the output file `<workspace>/<mode-folder>/results/<save-name>` exists,
# load the existing latency and skip these model in profiling
if broken_point_mode and os.path.isfile(os.path.join(info_save_path, save_name)):
from nn_meter.builder.backend_meta.utils import read_profiled_results
with open(os.path.join(info_save_path, save_name), 'r') as fp:
profiled_models = read_profiled_results(json.load(fp))
for module_key, module in models.items():
if module_key not in profiled_models:
continue
for id, model in module.items():
if id in profiled_models[module_key]:
model.update(profiled_models[module_key][id])
# profile models and get metric results
count = 0
detail = builder_config.get('DETAIL', mode)
save_name = save_name or "profiled_results.json"
logging.info("Profiling ...")
for module in models.values():
for id, model in module.items():
if broken_point_mode and 'latency' in model and model['latency'].avg != 0:
continue
if have_converted: # the models have been converted for the backend
try:
model_path = model['converted_model']
profiled_res = backend.profile(model_path, metrics, model['shapes'], **kwargs)
for metric in metrics:
model[metric] = profiled_res[metric]
time.sleep(0.2)
count += 1
except Exception as e:
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(f"{id}: {e}\n")
else: # the models have not been converted
try:
model_path = model['model']
profiled_res = backend.profile_model_file(model_path, model_save_path, model['shapes'], metrics, **kwargs)
for metric in metrics:
model[metric] = profiled_res[metric]
time.sleep(0.2)
count += 1
except Exception as e:
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(f"{id}: {e}\n")
# save information to json file for per 50 models
if count > 0 and count % log_frequency == 0:
save_profiled_results(models, os.path.join(info_save_path, save_name), detail, metrics)
logging.keyinfo(f"{count} models complete. Still profiling... Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
# save information to json file
save_profiled_results(models, os.path.join(info_save_path, save_name), detail, metrics)
logging.keyinfo(f"All {count} models profiling complete. Save all success profiled results to {os.path.join(info_save_path, save_name)}.")
return models
def sample_and_profile_kernel_data(kernel_type, sample_num, backend, sampling_mode = 'prior', configs = None, mark = '', detail = True,
metrics = ["latency"], **kwargs):
''' sample kernel configs and profile kernel model based on configs
'''
from nn_meter.builder.kernel_predictor_builder import generate_config_sample
# sample configs for kernel and generate models
models = generate_config_sample(kernel_type, sample_num, mark=mark,
sampling_mode=sampling_mode, configs=configs)
# connect to backend, run models and get latency
backend = connect_backend(backend_name=backend)
profiled_results = profile_models(backend, models, mode='predbuild', metrics=metrics, save_name=f"profiled_{kernel_type}.json")
return profiled_results
def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10,
iteration = 5, error_threshold = 0.1, predict_label = "latency", mark = ""):
"""
Build latency predictor for given kernel. This method contains three main steps:
1. sample kernel configs and profile kernel model based on configs;
2. initialize latency predictor of kernel based on the profiled data;
3. adopt adaptive sampler with iteratively doing step 1 for finegrained sampling to improve predictor performance
@params
kernel_type (str): the type of kernel
backend (str): the name of backend instance to profile models
init_sample_num (int, optional): the data size for predictor initialization. Defaults to 1000.
finegrained_sample_num (int, optional): the data size for adaptive sampling. For each data with error higher than
error_threshold, #finegrained_sample_num data will be generated based the the large error data. Defaults to 10.
iteration (int, optional): the iteration for sampling and training. Initial sampling is regarded as iteration 1,
thus `iteration == 2` means one iteration for adaptive sampling. Defaults to 5.
error_threshold (float, optional): the threshold of large error. Defaults to 0.2.
predict_label (str): the predicting label to build kernel predictor. Defaults to "latency"
"""
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data
workspace_path = builder_config.get('WORKSPACE', 'predbuild')
mark = mark if mark == "" else "_" + mark
# init predictor builder with prior data sampler
kernel_data = sample_and_profile_kernel_data(kernel_type, init_sample_num, backend, sampling_mode='prior', mark=f'prior{mark}')
# use current sampled data to build regression model, and locate data with large errors in testset
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark=f'prior{mark}',
save_path=os.path.join(workspace_path, "results"), predict_label=predict_label)
logging.keyinfo(f'Iteration 0: acc10 {acc10}, error_configs number: {len(error_configs)}')
for i in range(1, iteration):
# finegrained sampling and profiling for large error data
new_kernel_data = sample_and_profile_kernel_data(kernel_type, finegrained_sample_num, backend,
sampling_mode='finegrained', configs=error_configs, mark=f'finegrained{i}{mark}')
# merge finegrained data with previous data and build new regression model
kernel_data = merge_info(new_info=new_kernel_data, prev_info=kernel_data)
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark=f'finegrained{i}{mark}',
save_path=os.path.join(workspace_path, "results"), predict_label=predict_label)
logging.keyinfo(f'Iteration {i}: acc10 {acc10}, error_configs number: {len(error_configs)}')
return predictor, kernel_data
def build_initial_predictor_by_data(kernel_type, backend = None, init_sample_num = 20, error_threshold = 0.1, mark = '', predict_label = "latency"):
return build_predictor_for_kernel(kernel_type, backend, init_sample_num=init_sample_num, iteration=1, error_threshold=error_threshold, predict_label=predict_label, mark=f'{mark}')
def build_adaptive_predictor_by_data(kernel_type, kernel_data, backend = None, finegrained_sample_num = 20, error_threshold = 0.1, mark = '', predict_label = "latency"):
""" Run adaptive sampler in one iteration based
"""
workspace_path = builder_config.get('WORKSPACE', 'predbuild')
save_path = os.path.join(workspace_path, "results")
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data, collect_kernel_data
_, _, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend = backend, error_threshold=error_threshold, save_path=None, predict_label=predict_label)
new_kernel_data = sample_and_profile_kernel_data(kernel_type, finegrained_sample_num, backend,
sampling_mode='finegrained', configs=error_configs, mark=mark)
# merge finegrained data with previous data and build new regression model
mark = mark if mark == "" else "_" + mark
kernel_data = merge_info(new_info=new_kernel_data, prev_info=collect_kernel_data(kernel_data))
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold,
mark=f'finegrained{mark}', save_path=save_path, predict_label=predict_label)
logging.keyinfo(f'{mark}: acc10 {acc10}, error_configs number: {len(error_configs)}')
return predictor, kernel_data
def build_latency_predictor(backend):
"""
Build latency predictor for all kernel in `<workspace-path>/configs/predictorbuild_config.yaml`
@params
backend (str): the name of backend instance to profile models
"""
kernels = builder_config.get("KERNELS", 'predbuild')
for kernel_type in kernels:
init_sample_num = kernels[kernel_type]["INIT_SAMPLE_NUM"]
finegrained_sample_num = kernels[kernel_type]["FINEGRAINED_SAMPLE_NUM"]
iteration = kernels[kernel_type]["ITERATION"]
error_threshold = kernels[kernel_type]["ERROR_THRESHOLD"]
build_predictor_for_kernel(
kernel_type, backend,
init_sample_num = init_sample_num,
finegrained_sample_num = finegrained_sample_num,
iteration = iteration,
error_threshold = error_threshold
)
|
the-stack_0_11685 | import numpy as np
class KNN:
"""
K-neariest-neighbor classifier using L1 loss
"""
def __init__(self, k=1):
self.k = k
def fit(self, X, y):
self.train_X = X
self.train_y = y
def predict(self, X, num_loops=0):
'''
Uses the KNN model to predict clases for the data samples provided
Arguments:
X, np array (num_samples, num_features) - samples to run
through the model
num_loops, int - which implementation to use
Returns:
predictions, np array of ints (num_samples) - predicted class
for each sample
'''
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
else:
dists = self.compute_distances_two_loops(X)
if self.train_y.dtype == np.bool:
return self.predict_labels_binary(dists)
else:
return self.predict_labels_multiclass(dists)
def compute_distances_two_loops(self, X):
'''
Computes L1 distance from every sample of X to every training sample
Uses simplest implementation with 2 Python loops
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
for i_test in range(num_test):
for i_train in range(num_train):
dists[i_test][i_train] = np.sum(np.abs(X[i_test] - self.train_X[i_train]))
return dists
def compute_distances_one_loop(self, X):
'''
Computes L1 distance from every sample of X to every training sample
Vectorizes some of the calculations, so only 1 loop is used
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
for i_test in range(num_test):
dists[i_test] = np.sum(np.abs(X[i_test] - self.train_X), axis=1)
return dists
def compute_distances_no_loops(self, X):
'''
Computes L1 distance from every sample of X to every training sample
Fully vectorizes the calculations using numpy
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
# Using float32 to to save memory - the default is float64
dists = np.zeros((num_test, num_train), np.float32)
dists = np.abs(X[:, None] - self.train_X).sum(-1)
return dists
def predict_labels_binary(self, dists):
'''
Returns model predictions for binary classification case
Arguments:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
Returns:
pred, np array of bool (num_test_samples) - binary predictions
for every test sample
'''
num_test = dists.shape[0]
pred = np.zeros(num_test, np.bool)
for i in range(num_test):
pred[i] = self.train_y[np.argsort(dists[i])[:self.k]].sum() > self.k / 2
return pred
def predict_labels_multiclass(self, dists):
'''
Returns model predictions for multi-class classification case
Arguments:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
Returns:
pred, np array of int (num_test_samples) - predicted class index
for every test sample
'''
num_test = dists.shape[0]
num_test = dists.shape[0]
pred = np.zeros(num_test, np.int)
for i in range(num_test):
# TODO: Implement choosing best class based on k
# nearest training samples
pass
return pred
|
the-stack_0_11687 |
"""Class :py:class:`CMDBBUtils` utilities for calib manager DB methods
==============================================================================
Usage ::
# Test: python lcls2/psana/psana/graphqt/CMDBUtils.py
# Import
from psana.graphqt.CMDBUtils import dbu
# See test at the EOF
See:
- :class:`CMWMain`
- :class:`CMWConfig`
- `on github <https://github.com/slac-lcls/lcls2>`_.
Created on 2018-04-10 by Mikhail Dubrovin
"""
import logging
logger = logging.getLogger(__name__)
#_name = 'DCMDBUtils'
#from psana.pyalgos.generic.Logger import logger
import psana.pscalib.calib.MDBUtils as dbu
ObjectId = dbu.ObjectId
connect_to_server = dbu.connect_to_server
#database_names = dbu.database_names
database = dbu.database
#collection_names = dbu.collection_names
collection = dbu.collection
timestamp_id = dbu.timestamp_id
doc_add_id_ts = dbu.doc_add_id_ts
db_prefixed_name = dbu.db_prefixed_name
time_and_timestamp= dbu.time_and_timestamp
exportdb = dbu.exportdb
importdb = dbu.importdb
out_fname_prefix = dbu.out_fname_prefix
save_doc_and_data_in_file = dbu.save_doc_and_data_in_file
#insert_data_and_doc = dbu.insert_data_and_doc
#document_info = dbu.document_info
#db_prefixed_name = dbu.db_prefixed_name # ('')
#delete_databases = dbu.delete_databases # (list_db_names)
#delete_collections= dbu.delete_collections # (dic_db_cols)
#collection_info = dbu.collection_info # (client, dbname, colname)
from psana.graphqt.CMConfigParameters import cp
def connect_client(host=None, port=None, user=cp.user, upwd=cp.upwd): # user=dbu.cc.USERNAME
_host = cp.cdb_host.value() if host is None else host
_port = cp.cdb_port.value() if port is None else port
#logger.debug('CMDBBUtils: Connect client to host: %s port: %d user: %s upwd: %s' % (_host, _port, user, upwd))
return dbu.connect_to_server(_host, _port, user, upwd)
# if cp.upwd else dbu.connect_to_server(_host, _port, cp.user)
def database_names(client=None):
"""
"""
if client is None:
client = connect_client()
return dbu.database_names(client)
def collection_names(db):
"""
"""
if isinstance(db, str):
client = connect_client()
db = dbu.database(client, db)
return dbu.collection_names(db)
def delete_databases(list_db_names):
"""Delete databases specified in the list_db_names
"""
client = connect_client()
logger.debug('Delete databases:\n %s' % ('\n '.join(list_db_names)))
dbu.delete_databases(client, list_db_names)
def delete_collections(dic_db_cols):
"""Delete collections specified in the dic_db_cols consisting of pairs {dbname:lstcols}
"""
msg = 'Delete collections:'
client = connect_client()
for dbname, lstcols in dic_db_cols.items():
db = dbu.database(client, dbname)
msg += '\nFrom database: %s delete collections:\n %s' % (dbname, '\n '.join(lstcols))
dbu.delete_collections(db, lstcols)
logger.debug(msg)
def delete_documents(dbname, colname, doc_ids):
"""Delete documents with _id-s in doc_ids from dbname, colname
"""
#logger.debug('Deleting documents:\n %s' % ('\n '.join(doc_ids)))
client = connect_client()
db, fs = dbu.db_and_fs(client, dbname)
col = collection(db, colname)
#msg = 'Deleted documents from db: %s col: %s' % (dbname, colname)
for s in doc_ids:
oid = ObjectId(s)
doc = dbu.find_doc(col, query={'_id':oid})
if doc is None: continue
#msg += '\n %s and its data' % doc.get('_id', 'N/A')
dbu.del_document_data(doc, fs)
dbu.delete_document_from_collection(col, oid)
#logger.debug(msg)
def insert_document_and_data(dbname, colname, doc, data):
client = connect_client()
db, fs = dbu.db_and_fs(client, dbname)
col = collection(db, colname)
id_data, id_doc = dbu.insert_data_and_doc(data, fs, col, **doc)
return id_data, id_doc
def get_data_for_doc(dbname, doc):
client = connect_client()
db, fs = dbu.db_and_fs(client, dbname)
return dbu.get_data_for_doc(fs, doc)
def collection_info(dbname, colname):
"""Delete collections specified in the dic_db_cols consisting of pairs {dbname:lstcols}
"""
client = connect_client()
return dbu.collection_info(client, dbname, colname)
def list_of_documents(dbname, colname):
client = connect_client()
db = database(client, dbname)
#db, fs = dbu.db_and_fs(client, dbname='cdb-cxi12345')
col = collection(db, colname)
docs = col.find().sort('_id', dbu.DESCENDING)
return [d for d in docs]
def document_info(doc, keys=('time_sec','time_stamp','experiment',\
'detector','ctype','run','id_data_ts','data_type','data_dtype', '_id'),\
fmt='%10s %24s %11s %24s %16s %4s %30s %10s %10s %24s'):
"""The same as dbu.document_info, but with different default parameters (added _id).
"""
return dbu.document_info(doc, keys, fmt)
# EOF
|
the-stack_0_11688 | # Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for data sets.
Represents real or simulated impression log data for multiple publishers.
"""
from collections import defaultdict
from copy import deepcopy
from os import listdir
from os.path import isfile, join
from pathlib import Path
from typing import Dict
from typing import Iterable
from typing import List
from wfa_planning_evaluation_framework.data_generators.publisher_data import (
PublisherData,
)
from wfa_planning_evaluation_framework.models.reach_point import ReachPoint
class DataSet:
"""Real or simulated impression log data for multiple publishers.
A DataSet represents a real or simulated configuration of impression log
data for a collection of related campaigns across multiple publishers.
It represents the basic unit across which modeling strategies are compared.
It is expected that this class will be sub-classed for each of the different
types of publisher overlap models that will be investigate. Thus, we might
have an IndependentDataSet, a SequentiallyCorrelatedDataSet, etc.
"""
def __init__(self, publisher_data_list: Iterable[PublisherData], name: str = None):
"""Constructor
Args:
publisher_data_list: An iterable list of PublisherDatas,
one for each of the publishers that comprise this DataSet.
name: If specified, a human-readable name that will be associated
to this DataSet. For example, it could be an encoding
of the parameters that were used to create this DataSet,
such as "homog_p=10_rep=3". If no name is given, then a random
digit string is assigned as the name.
"""
self._data = deepcopy(publisher_data_list)
total_audience = set()
for pub in self._data:
total_audience.update([id for id, _ in pub._data])
self._maximum_reach = len(total_audience)
if name:
self._name = name
else:
self._name = "{:012d}".format(randint(0, 1e12))
@property
def publisher_count(self):
"""Number of publishers represented in this DataSet."""
return len(self._data)
@property
def maximum_reach(self):
"""Total number of reachable people across all publishers."""
return self._maximum_reach
@property
def name(self):
"""Name of this DataSet."""
return self._name
def spend_by_impressions(self, impressions: Iterable[int]) -> List[float]:
"""Returns spend vector corresponding to a given impression vector.
Args:
impressions: Iterable of hypothetical impression buys, having
one value per publisher.
Returns:
List of corresponding spends. If I is the vector of impressions
and S is the returned vector of spends, then S[k] is the amount
that would need to be spent with the k-th publisher to obtain
I[k] impressions.
"""
return [
self._data[i].spend_by_impressions(impressions[i])
for i in range(len(self._data))
]
def impressions_by_spend(self, spends: Iterable[float]) -> List[int]:
"""Returns impression vector corresponding to a given spend vector.
Args:
spends: Iterable of hypothetical spend amounts, having
one value per publisher.
Returns:
List of corresponding impression counts. If S is the vector of
spends and I is the returned vector of impression counts, then
I[k] is the number of impressions that would be obtained for
a spend of S[k] with publisher k.
"""
return [
self._data[i].impressions_by_spend(spends[i])
for i in range(len(self._data))
]
def reach_by_impressions(
self, impressions: Iterable[int], max_frequency: int = 10
) -> ReachPoint:
"""Number of people reached for a given impression count.
Args:
impressions: A list of impression counts. The length of the list must
equal the value of publisher_count. Specifies the number of impressions
that each publisher will deliver.
max_frequency: int, The maximum frequency that should be counted. All
frequencies about this amount will be grouped into a single bucket.
Returns:
A ReachPoint object representing the k+ reach for each frequency
in the range 1..max_frequency.
"""
if len(impressions) != self.publisher_count:
raise ValueError(
"Invalid impression vector length. Got {}, expected {}".format(
len(impressions), self.publisher_count
)
)
counts = defaultdict(int)
spends = []
for i, imp in enumerate(impressions):
spends.append(self._data[i].spend_by_impressions(imp))
for id, freq in self._data[i].user_counts_by_impressions(imp).items():
counts[id] += freq
kplus_reaches = self._counts_to_histogram(counts, max_frequency)
return ReachPoint(impressions, kplus_reaches, spends)
def _counts_to_histogram(
self, counts: Dict[int, int], max_frequency: int
) -> List[int]:
"""Constructs k+ reach list from a dictionary of per-id reach counts."""
frequency_counts = [0] * max_frequency
for c in counts.values():
frequency_counts[min(c, max_frequency) - 1] += 1
# At this point, frequency_counts[k] represents the number of people who are
# reach exactly k+1 times, except that frequency_counts[max_frequency-1] contains
# the number of people reached at least max_frequency times. Now, we convert this
# to a list of k+ reach values.
for i in range(max_frequency - 2, -1, -1):
frequency_counts[i] += frequency_counts[i + 1]
return frequency_counts
def reach_by_spend(
self, spends: Iterable[float], max_frequency: int = 10
) -> ReachPoint:
"""Number of people reached for a given spend.
Args:
spends: A list of spend amounts. The length of the list must
equal the value of publisher_count. Specifies the amount spent with
each publisher.
max_frequency: int, The maximum frequency that should be counted. All
frequencies about this amount will be grouped into a single bucket.
Returns:
A ReachPoint object representing the k+ reach for each frequency
in the range 1..max_frequency.
"""
if len(spends) != self.publisher_count:
raise ValueError(
"Invalid spends vector length. Got {}, expected {}".format(
len(spends), self.publisher_count
)
)
counts = defaultdict(int)
impressions = []
for i, publisher_spend in enumerate(spends):
user_counts = self._data[i].user_counts_by_spend(publisher_spend)
impressions.append(sum(user_counts.values()))
for id, freq in user_counts.items():
counts[id] += freq
kplus_reaches = self._counts_to_histogram(counts, max_frequency)
return ReachPoint(impressions, kplus_reaches, spends)
def write_data_set(self, parent_dir: str, dataset_dir: str = None) -> None:
"""Writes this DataSet object to disk.
Args:
parent_dir: The directory where the DataSet is to be written.
dataset:dir: The directory name of the DataSet itself. If not
specified, then the name given in the object constructor is
used. If no name was given in the object constructor, then a
random name is used.
"""
if not dataset_dir:
dataset_dir = self._name
fulldir = join(parent_dir, dataset_dir)
Path(fulldir).mkdir(parents=True, exist_ok=True)
for pdf in self._data:
with open(join(fulldir, pdf.name), "w") as file:
pdf.write_publisher_data(file)
file.close()
@classmethod
def read_data_set(cls, dirpath: str) -> "DataSet":
"""Reads a DataSet from disk.
A DataSet is given by a directory containing a collection of files,
each of which represents a PublisherDataSet. The name associated to
the DataSet object is the last component of the dirpath.
Args:
dirpath: Directory containing the PublisherDataSets that comprise
this DataSet.
Returns:
The DataSet object representing the contents of this directory.
"""
pdf_list = []
for f in sorted(listdir(dirpath)):
filepath = join(dirpath, f)
if isfile(filepath):
with open(filepath) as file:
try:
pdf = PublisherData.read_publisher_data(file)
pdf.name = f
pdf_list.append(pdf)
except (ValueError, RuntimeError) as e:
raise RuntimeError(
"In publisher file {}".format(filepath)
) from e
name = dirpath.split("/")[-1]
return cls(pdf_list, name)
|
the-stack_0_11689 | import torch
torch.cuda.manual_seed(3)
torch.manual_seed(3)
import data_handler, tracking_nn
import sys
from torch.optim import Adam
flag = int(sys.argv[1])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Working on", device)
batch_size = 32
cnn = tracking_nn.CNN().to(device)
if flag:
cnn.load_state_dict(torch.load("cnn_model.pt", map_location = device))
for param in cnn.parameters():
param.requires_grad = False
rnn = tracking_nn.RNN().to(device)
model = tracking_nn.Net(device, cnn, rnn).to(device)
paths = ["p11/2.a", "p11/3.a", "p16/3.a", "p17/2.a", "p17/3.a", "p1/2.a", "p18/2.a", "p18/3.a"]
data = data_handler.LegDataLoader(batch_size = batch_size)
# Train the nn
epochs = 1000
patience = 0
learning_rate = 0.0001
grid = 7
optimizer = Adam(model.parameters(), lr = learning_rate)
best_acc = float("Inf")
if flag:
save_path = "model.pt"
else:
save_path = "cnn_model.pt"
def eucl_dist(out, labels):
ret = 0
m = 0
for i in range(out.shape[0]):
yh = out[i]
p1_h = yh[0, :, :]
p2_h = yh[3, :, :]
detect_cell1 = p1_h.reshape(-1).argmax(axis = 0)
detect_cell2 = p2_h.reshape(-1).argmax(axis = 0)
x1, y1 = detect_cell1 // grid, detect_cell1 % grid
x2, y2 = detect_cell2 // grid, detect_cell2 % grid
d1 = (torch.sqrt((x1 + out[i, 1, x1, y1] - labels[i, 0, 0]) ** 2 + (y1 + out[i, 2, x1, y1] - labels[i, 0, 1]) ** 2)).item()
d2 = (torch.sqrt((x2 + out[i, 4, x2, y2] - labels[i, 1, 0]) ** 2 + (y2 + out[i, 5, x2, y2] - labels[i, 1, 1]) ** 2)).item()
if d1 > m:
m = d1
if d2 > m:
m = d2
ret += (d1 + d2) / 2
return m, ret / out.shape[0]
print("Started training...")
for epoch in range(epochs):
running_loss = 0
if epoch == 20 or epoch == 50:
learning_rate *= 0.1
optimizer = Adam(model.parameters(), lr = learning_rate)
f, input, label = data.load(0)
model.init_hidden()
c = 0
while(True):
input, label = input.to(device), label.to(device)
optimizer.zero_grad()
output = model.forward(input)
#print("labels", labels[0])
loss = model.loss(output, label)
loss.backward()
optimizer.step()
running_loss += loss.item() / input.shape[0]
c += 1
if f == -1:
break
if f:
model.init_hidden()
f, input, label = data.load(0)
#model.init_hidden()
model.detach_hidden()
print("epoch:{}, running loss: {}".format(epoch, running_loss / c))
running_loss = 0
if epoch >= patience:
with torch.no_grad():
acc = 0
dist = 0
c = 0
f, input, label = data.load(1)
model.init_hidden()
m = 0
while(True):
input, label = input.to(device), label.to(device)
output = model.forward(input)
acc += model.loss(output, label).item() / input.shape[0]
m1, d = eucl_dist(output, label)
dist += d
if m1 > m:
m = m1
c += 1
if f == -1:
break
if f:
model.init_hidden()
f, input, label = data.load(1)
#model.init_hidden()
if acc < best_acc:
best_acc = acc
print("Saving model with acc:", acc / c, ", mean dist:", dist / c / grid * 100, ", max dist:", m / grid * 100) #mean dist in cm
if flag:
torch.save(model, save_path)
else:
torch.save(cnn.state_dict(), save_path)
|
the-stack_0_11691 | import numpy as np
import multidim
import itertools
import os
import hdbscan
import sys
import time
import pandas as pd
from copy import deepcopy
from matplotlib.patches import Ellipse
from ripser import ripser
from persim import plot_diagrams
from numba import jit, njit, prange
from sklearn import mixture
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import RidgeClassifier
from multidim.covertree import CoverTree
from multidim.models import CDER
import matplotlib.pyplot as plt
np.set_printoptions(precision=2)
sys.path.append('../..')
from ATS import *
# -----------------------------------------------------------------------------
# ------------------------------ IMPORT DATA ----------------------------------
# -----------------------------------------------------------------------------
num_dgms = int(sys.argv[1])
N = num_dgms*6
d = 10
colors = ['red', 'yellow', 'magenta', 'green', 'blue', 'black']
os.system('mkdir cder_images')
os.system('rm -r cder_images/*')
score_train = []
score_test = []
for n in range(10):
X = testSetManifolds(numDgms = num_dgms, numPts = 200, permute = True)
F_labels = X.trainingLabel
labels = X.trainingLabel.unique()
X_dgm0 = X.Dgm0.tolist()
# We need to perturbate H_0 to use CDER.
for h0 in X_dgm0:
h0[:,0] = h0[:,0] + np.random.uniform(-0.05, 0.05, len(h0))
h0[:,1][h0[:,1]==np.inf] = 10 # Changge all inf values in H_0 for 10.
X_dgm1 = X.Dgm1.tolist()
i=0
for l in labels:
F_labels[F_labels == l]=i
i += 1
F = F_labels.tolist()
# -----------------------------------------------------------------------------
# ------------------------------ H_0 ------------------------------------------
# -----------------------------------------------------------------------------
X_train, X_test, F_train, F_test = train_test_split(X_dgm0, F, test_size=0.33, random_state=10)
# -----------------------------------------------------------------------------
# ------------------------------ GMM ------------------------------------------
# -----------------------------------------------------------------------------
print('Begin GMM...')
t0 = time.time()
X_train_temp = np.vstack(X_train)
X_train_temp = X_train_temp[:,1]
X_train_temp = X_train_temp.reshape((-1,1))
gmm_f_train=[]
for i in range(len(X_train)):
gmm_f_train.append(F_train[i]*np.ones(len(X_train[i])))
gmm_f_train = np.concatenate(gmm_f_train)
gmm = mixture.BayesianGaussianMixture(n_components=d, covariance_type='full', max_iter=int(10e4)).fit(X_train_temp, gmm_f_train)
ellipses = []
for i in range(len(gmm.means_)):
L, v = np.linalg.eig(gmm.covariances_[i])
temp = {'mean':gmm.means_[i], 'std':np.sqrt(L), 'rotation':v.transpose(), 'radius':max(np.sqrt(L)), 'entropy':gmm.weights_[i]}
ellipses.append(temp)
t1 = time.time()
print('Finish GMM. Time: {}'.format(t1-t0))
# -----------------------------------------------------------------------------
# ------------------------------ GMM features ---------------------------------
# -----------------------------------------------------------------------------
t0 = time.time()
X_train_temp = [dgm[:,1] for dgm in X_train]
X_train_features_0 = get_all_features(X_train_temp, ellipses, f_gaussian)
X_test_temp = [dgm[:,1] for dgm in X_test]
X_test_features_0 = get_all_features(X_test_temp, ellipses, f_gaussian)
t1 = time.time()
print('Features H_0:{}'.format(t1-t0))
# -----------------------------------------------------------------------------
# ------------------------------ H_1 ------------------------------------------
# -----------------------------------------------------------------------------
X_train, X_test, F_train, F_test = train_test_split(X_dgm1, F, test_size=0.33, random_state=10)
# -----------------------------------------------------------------------------
# ------------------------------ CDER -----------------------------------------
# -----------------------------------------------------------------------------
F_train_cder = F_train.copy()
for l in range(6):
for k, j in enumerate(F_train_cder):
if j == l:
F_train_cder[k] = colors[l]
pc_train = multidim.PointCloud.from_multisample_multilabel(X_train, F_train_cder)
ct_train = CoverTree(pc_train)
cder = CDER(parsimonious=True)
cder.fit(ct_train)
cder_result = cder.gaussians
ellipses = []
for c in cder_result:
temp = {key:c[key] for key in ['mean', 'std', 'rotation', 'radius', 'entropy']}
temp['std'] = 3*temp['std']
ellipses.append(temp)
for i in range(len(X_train)):
dgm = np.array(X_train[i])
plt.scatter(dgm[:,0], dgm[:,1], color='grey')
ellipses_plot_cder = []
for i in range(len(ellipses)):
e = ellipses[i]
ellipses_plot_cder.append(Ellipse(xy=e['mean'], width=e['std'][0], height=e['std'][1], angle=np.arccos(e['rotation'][0,0])))
for e in ellipses_plot_cder:
plt.gca().add_artist(e)
e.set_clip_box(plt.gca().bbox)
e.set_alpha(0.5)
e.set_facecolor([1,0,0])
plt.savefig('cder_images/{}_h1_cder_n_{}.png'.format(n, num_dgms))
plt.close()
# -----------------------------------------------------------------------------
# ------------------------------ CDER features --------------------------------
# -----------------------------------------------------------------------------
X_train_features_1 = get_all_features(X_train, ellipses, f_ellipse)
X_test_features_1 = get_all_features(X_test, ellipses, f_ellipse)
# -----------------------------------------------------------------------------
# ------------------------------ Ridge Classification ------------------------
# -----------------------------------------------------------------------------
X_train_features = np.column_stack((X_train_features_0, X_train_features_1))
X_test_features = np.column_stack((X_test_features_0, X_test_features_1))
ridge_model = RidgeClassifier().fit(X_train_features, F_train)
score_train.append(ridge_model.score(X_train_features, F_train))
score_test.append(ridge_model.score(X_test_features, F_test))
# print('train', score_train[-1])
# print('test', score_test[-1])
print(np.mean(score_train), np.std(score_train))
print(np.mean(score_test), np.std(score_test)) |
the-stack_0_11692 | from os.path import abspath, join, dirname
from sys import path
from envs.keys_and_passwords import *
PROJECT_ROOT = abspath(join(dirname(__file__), "../"))
APPS_DIR = abspath(join(dirname(__file__), "../", "apps"))
path.insert(0, PROJECT_ROOT)
path.insert(0, APPS_DIR)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Steven Skoczen', '[email protected]'),
)
DEFAULT_FROM_EMAIL = "[email protected]"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'project.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': DB_PASSWORD, # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Vancouver'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
MEDIA_ROOT = join(PROJECT_ROOT, "media_root")
MEDIA_URL = ''
STATIC_ROOT = join(PROJECT_ROOT, "collected_static")
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
AUTH_PROFILE_MODULE = 'my_schools.Person'
FACEBOOK_APP_ID = '400474649994341'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^7!$isr6jd!o+mgl1qy@+8197dm53uhp2i*vp8k4p#*g#8mg1n'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_facebook.auth_backends.FacebookBackend',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
join(abspath(PROJECT_ROOT), "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
"analytical",
"annoying",
"compressor",
"django_extensions",
"django_facebook",
"lettuce.django",
"gunicorn",
"south",
"home",
"schools",
"events",
"my_schools",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
'django_facebook.context_processors.facebook',
)
STATICFILES_EXCLUDED_APPS = []
COMPRESS_ROOT = STATIC_ROOT
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
GOOGLE_ANALYTICS_PROPERTY_ID = "UA-35602695-1"
GAUGES_SITE_ID = ""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
the-stack_0_11693 | #!/usr/bin/env python3
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex_boards.platforms import arty
from ring import *
# CRG ----------------------------------------------------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
clk = platform.request("clk100")
rst_n = platform.request("cpu_reset")
self.comb += self.cd_sys.clk.eq(clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~rst_n)
platform.add_period_constraint(clk, 1e9/100e6)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCMini):
def __init__(self, sys_clk_freq=int(100e6), mode=mode.DOUBLE, **kwargs):
platform = arty.Platform(variant="a7-35", toolchain="vivado")
from litex.build.generic_platform import Pins, IOStandard
platform.add_extension([("do", 0, Pins("B7"), IOStandard("LVCMOS33"))])
SoCMini.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Arty A7-35",
ident_version = True)
self.submodules.crg = CRG(platform, sys_clk_freq)
led = RingControl(platform.request("do"), mode, 12, sys_clk_freq)
self.submodules.ledring = led
self.add_csr("ledring")
self.add_uartbone()
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Arty A7-35")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--mode-single", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--flash", action="store_true", help="Flash Bitstream")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
m = mode.DOUBLE
if args.mode_single:
m = mode.SINGLE
soc = BaseSoC(
sys_clk_freq = 100e6,
mode = m,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
exit()
if __name__ == "__main__":
main()
|
the-stack_0_11694 | import os, sys
import math
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
import argparse
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
# parser.add_argument("--ir_optim", type=str2bool, default=True)
# parser.add_argument("--use_tensorrt", type=str2bool, default=False)
# parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=500)
# params for text detector
parser.add_argument("--image_dir", type=str)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_path", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
# DB parmas
parser.add_argument("--det_db_thresh", type=float, default=0.3)
parser.add_argument("--det_db_box_thresh", type=float, default=0.5)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6)
parser.add_argument("--max_batch_size", type=int, default=10)
parser.add_argument("--use_dilation", type=bool, default=False)
parser.add_argument("--det_db_score_mode", type=str, default="fast")
# EAST parmas
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
# SAST parmas
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_polygon", type=bool, default=False)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_path", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument("--drop_score", type=float, default=0.5)
parser.add_argument("--limited_max_width", type=int, default=1280)
parser.add_argument("--limited_min_width", type=int, default=16)
parser.add_argument(
"--vis_font_path", type=str,
default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'doc/fonts/simfang.ttf'))
parser.add_argument(
"--rec_char_dict_path",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'pytorchocr/utils/ppocr_keys_v1.txt'))
# params for text classifier
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_path", type=str)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--cls_batch_num", type=int, default=6)
parser.add_argument("--cls_thresh", type=float, default=0.9)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
# params for e2e
parser.add_argument("--e2e_algorithm", type=str, default='PGNet')
parser.add_argument("--e2e_model_path", type=str)
parser.add_argument("--e2e_limit_side_len", type=float, default=768)
parser.add_argument("--e2e_limit_type", type=str, default='max')
# PGNet parmas
parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5)
parser.add_argument(
"--e2e_char_dict_path", type=str,
default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'pytorchocr/utils/ic15_dict.txt'))
parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True)
parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
# params .yaml
parser.add_argument("--det_yaml_path", type=str, default=None)
parser.add_argument("--rec_yaml_path", type=str, default=None)
parser.add_argument("--cls_yaml_path", type=str, default=None)
parser.add_argument("--e2e_yaml_path", type=str, default=None)
return parser.parse_args()
def get_default_config(args):
return vars(args)
def read_network_config_from_yaml(yaml_path):
if not os.path.exists(yaml_path):
raise FileNotFoundError('{} is not existed.'.format(yaml_path))
import yaml
with open(yaml_path, encoding='utf-8') as f:
res = yaml.safe_load(f)
if res.get('Architecture') is None:
raise ValueError('{} has no Architecture'.format(yaml_path))
return res['Architecture']
def AnalysisConfig(weights_path, yaml_path=None):
if not os.path.exists(os.path.abspath(weights_path)):
raise FileNotFoundError('{} is not found.'.format(weights_path))
if yaml_path is not None:
return read_network_config_from_yaml(yaml_path)
weights_basename = os.path.basename(weights_path)
weights_name = weights_basename.lower()
# supported_weights = ['ch_ptocr_server_v2.0_det_infer.pth',
# 'ch_ptocr_server_v2.0_rec_infer.pth',
# 'ch_ptocr_mobile_v2.0_det_infer.pth',
# 'ch_ptocr_mobile_v2.0_rec_infer.pth',
# 'ch_ptocr_mobile_v2.0_cls_infer.pth',
# ]
# assert weights_name in supported_weights, \
# "supported weights are {} but input weights is {}".format(supported_weights, weights_name)
if weights_name == 'ch_ptocr_server_v2.0_det_infer.pth':
network_config = {'model_type':'det',
'algorithm':'DB',
'Transform':None,
'Backbone':{'name':'ResNet', 'layers':18, 'disable_se':True},
'Neck':{'name':'DBFPN', 'out_channels':256},
'Head':{'name':'DBHead', 'k':50}}
elif weights_name == 'ch_ptocr_server_v2.0_rec_infer.pth':
network_config = {'model_type':'rec',
'algorithm':'CRNN',
'Transform':None,
'Backbone':{'name':'ResNet', 'layers':34},
'Neck':{'name':'SequenceEncoder', 'hidden_size':256, 'encoder_type':'rnn'},
'Head':{'name':'CTCHead', 'fc_decay': 4e-05}}
elif weights_name == 'ch_ptocr_mobile_v2.0_det_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'DB',
'Transform': None,
'Backbone': {'name': 'MobileNetV3', 'model_name': 'large', 'scale': 0.5, 'disable_se': True},
'Neck': {'name': 'DBFPN', 'out_channels': 96},
'Head': {'name': 'DBHead', 'k': 50}}
elif weights_name == 'ch_ptocr_mobile_v2.0_rec_infer.pth':
network_config = {'model_type':'rec',
'algorithm':'CRNN',
'Transform':None,
'Backbone':{'model_name':'small', 'name':'MobileNetV3', 'scale':0.5, 'small_stride':[1,2,2,2]},
'Neck':{'name':'SequenceEncoder', 'hidden_size':48, 'encoder_type':'rnn'},
'Head':{'name':'CTCHead', 'fc_decay': 4e-05}}
elif weights_name == 'ch_ptocr_mobile_v2.0_cls_infer.pth':
network_config = {'model_type':'cls',
'algorithm':'CLS',
'Transform':None,
'Backbone':{'name':'MobileNetV3', 'model_name':'small', 'scale':0.35},
'Neck':None,
'Head':{'name':'ClsHead', 'class_dim':2}}
elif weights_name == 'det_mv3_db_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'DB',
'Transform': None,
'Backbone': {'name': 'MobileNetV3', 'model_name': 'large'},
'Neck': {'name': 'DBFPN', 'out_channels': 256},
'Head': {'name': 'DBHead', 'k': 50}}
elif weights_name == 'det_r50_vd_db_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'DB',
'Transform': None,
'Backbone': {'name': 'ResNet', 'layers': 50},
'Neck': {'name': 'DBFPN', 'out_channels': 256},
'Head': {'name': 'DBHead', 'k': 50}}
elif weights_name == 'det_mv3_east_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'EAST',
'Transform': None,
'Backbone': {'name': 'MobileNetV3', 'model_name': 'large'},
'Neck': {'name': 'EASTFPN', 'model_name': 'small'},
'Head': {'name': 'EASTHead', 'model_name': 'small'}}
elif weights_name == 'det_r50_vd_east_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'EAST',
'Transform': None,
'Backbone': {'name': 'ResNet', 'layers': 50},
'Neck': {'name': 'EASTFPN', 'model_name': 'large'},
'Head': {'name': 'EASTHead', 'model_name': 'large'}}
elif weights_name == 'det_r50_vd_sast_icdar15_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'SAST',
'Transform': None,
'Backbone': {'name': 'ResNet_SAST', 'layers': 50},
'Neck': {'name': 'SASTFPN', 'with_cab': True},
'Head': {'name': 'SASTHead'}}
elif weights_name == 'det_r50_vd_sast_totaltext_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'SAST',
'Transform': None,
'Backbone': {'name': 'ResNet_SAST', 'layers': 50},
'Neck': {'name': 'SASTFPN', 'with_cab': True},
'Head': {'name': 'SASTHead'}}
elif weights_name == 'en_server_pgneta_infer.pth':
network_config = {'model_type': 'e2e',
'algorithm': 'PGNet',
'Transform': None,
'Backbone': {'name': 'ResNet', 'layers': 50},
'Neck': {'name': 'PGFPN'},
'Head': {'name': 'PGHead'}}
else:
network_config = {'model_type': 'rec',
'algorithm': 'CRNN',
'Transform': None,
'Backbone': {'model_name': 'small', 'name': 'MobileNetV3', 'scale': 0.5,
'small_stride': [1, 2, 2, 2]},
'Neck': {'name': 'SequenceEncoder', 'hidden_size': 48, 'encoder_type': 'rnn'},
'Head': {'name': 'CTCHead', 'fc_decay': 4e-05}}
# raise NotImplementedError
return network_config
def draw_e2e_res(dt_boxes, strs, img_path):
src_im = cv2.imread(img_path)
for box, str in zip(dt_boxes, strs):
box = box.astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
cv2.putText(
src_im,
str,
org=(int(box[0, 0, 0]), int(box[0, 0, 1])),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.7,
color=(0, 255, 0),
thickness=1)
return src_im
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
def resize_img(img, input_size=600):
"""
resize img and limit the longest side of the image to input_size
"""
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
box[2][1], box[3][0], box[3][1]
],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text(
[box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def str_count(s):
"""
Count the number of Chinese characters,
a single English character and a single number
equal to half the length of Chinese characters.
args:
s(string): the input of string
return(int):
the number of Chinese characters
"""
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if c in string.ascii_letters or c.isdigit() or c.isspace():
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return s_len - math.ceil(en_dg_count / 2)
def text_visual(texts,
scores,
img_h=400,
img_w=600,
threshold=0.,
font_path="./doc/simfang.ttf"):
"""
create new blank img and draw txt on it
args:
texts(list): the text will be draw
scores(list|None): corresponding score of each txt
img_h(int): the height of blank img
img_w(int): the width of blank img
font_path: the path of font which is used to draw text
return(array):
"""
if scores is not None:
assert len(texts) == len(
scores), "The number of txts and corresponding scores must match"
def create_blank_img():
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
blank_img[:, img_w - 1:] = 0
blank_img = Image.fromarray(blank_img).convert("RGB")
draw_txt = ImageDraw.Draw(blank_img)
return blank_img, draw_txt
blank_img, draw_txt = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
gap = font_size + 5
txt_img_list = []
count, index = 1, 0
for idx, txt in enumerate(texts):
index += 1
if scores[idx] < threshold or math.isnan(scores[idx]):
index -= 1
continue
first_line = True
while str_count(txt) >= img_w // font_size - 4:
tmp = txt
txt = tmp[:img_w // font_size - 4]
if first_line:
new_txt = str(index) + ': ' + txt
first_line = False
else:
new_txt = ' ' + txt
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
txt = tmp[img_w // font_size - 4:]
if count >= img_h // gap - 1:
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
else:
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
# whether add new blank img or not
if count >= img_h // gap - 1 and idx + 1 < len(texts):
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if len(txt_img_list) == 1:
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img)
def base64_to_cv2(b64str):
import base64
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def draw_boxes(image, boxes, scores=None, drop_score=0.5):
if scores is None:
scores = [1] * len(boxes)
for (box, score) in zip(boxes, scores):
if score < drop_score:
continue
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
return image |
the-stack_0_11695 | """
Generic setup of the data sources and the model training.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
and also on
https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
"""
import logging
# Keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, Callback
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# Scipy
from scipy.stats import pearsonr
# Sklearn
from sklearn.model_selection import train_test_split
from GA.utils.utils import clean_data
# Helper: Early stopping.
early_stopper = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=2, verbose=0, mode='auto')
# In case that your training loss is not dropping - which means you are learning nothing after each epoch.
# It look like there's nothing to learn in this model, aside from some trivial linear-like fit or cutoff value.
def compile_model_mlp(geneparam, input_shape):
"""Compile a sequential model.
Args:
geneparam (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = geneparam['nb_layers']
nb_neurons = geneparam['nb_neurons']
activation = geneparam['activation']
optimizer = geneparam['optimizer']
dropout = geneparam['dropout']
weight_decay = geneparam['weight_decay']
print("Architecture:%d,%s,%s,%d,%.2f%%,%.2f%%" % (nb_neurons, activation, optimizer,
nb_layers, dropout,weight_decay))
logging.info("Architecture:%d,%s,%s,%d,%.2f%%,%.2f%%" % (nb_neurons, activation, optimizer,
nb_layers, dropout, weight_decay))
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
if weight_decay>0:
model.add(Dense(nb_neurons, activation=activation, input_dim=input_shape,
kernel_regularizer=l2(weight_decay)))
else:
model.add(Dense(nb_neurons, activation=activation, input_dim=input_shape))
else:
if weight_decay > 0:
model.add(Dense(nb_neurons, activation=activation, kernel_regularizer=l2(weight_decay)))
else:
model.add(Dense(nb_neurons, activation=activation))
if dropout > 0:
model.add(Dropout(dropout)) # dropout for each layer
# Output layer.
model.add(Dense(1))
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
return model
def compile_model_cnn(geneparam, nb_classes, input_shape):
"""Compile a sequential model.
Args:
geneparam (dict): the parameters of the genome
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = geneparam['nb_layers']
nb_neurons = geneparam['nb_neurons']
activation = geneparam['activation']
optimizer = geneparam['optimizer']
logging.info("Architecture:%d,%s,%s,%d" % (nb_neurons, activation, optimizer, nb_layers))
model = Sequential()
# Add each layer.
for i in range(0, nb_layers):
# Need input shape for first layer.
if i == 0:
model.add(
Conv2D(nb_neurons, kernel_size=(3, 3), activation=activation, padding='same', input_shape=input_shape))
else:
model.add(Conv2D(nb_neurons, kernel_size=(3, 3), activation=activation))
if i < 2: # otherwise we hit zero
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
# BAYESIAN CONVOLUTIONAL NEURAL NETWORKS WITH BERNOULLI APPROXIMATE VARIATIONAL INFERENCE
# need to read this paper
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def get_data(dataset):
markers, pheno = clean_data(dataset.trait, dataset.k)
x_train, x_test, y_train, y_test = train_test_split(markers, pheno, test_size=0.33, random_state=42)
return markers.shape[1], x_train, x_test, y_train, y_test
def train_and_score(geneparam, dataset):
"""Train the model, return test loss.
Args:
geneparam (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
logging.info("Getting datasets")
input_shape, x_train, x_test, y_train, y_test = get_data(dataset)
logging.info("Compling Keras model")
model = compile_model_mlp(geneparam, input_shape)
history = LossHistory()
model.fit(x_train, y_train,
epochs=1200,
# using early stopping so no real limit - don't want to waste time on horrible architectures
verbose=1,
validation_data =(x_test, y_test),
# callbacks=[history])
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test mse:', score[0])
print('Test mae:', score[1])
r = pearsonr(model.predict(x_test).ravel(), y_test)[0]
print('Test r:', r)
logging.info("R: %.3f" % r)
K.clear_session()
# we do not care about keeping any of this in memory -
# we just need to know the final scores and the architecture
if r != r:
r = -1.0
return r |
the-stack_0_11696 | # Copyright 2019 Christo Kirov. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for parsing PTB text files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import tensorflow as tf
def _read_words(filename):
print('Reading %s...' % (filename), file=sys.stderr)
with tf.io.gfile.GFile(filename, "r") as f:
return f.read().strip().replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
data += _read_words(filename + ".out")
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
id_to_word = dict(zip(range(len(words)), words))
# Save the vocab to a file, ordered according to the index.
with open('labels.txt', 'w', encoding='utf-8') as outfile:
for w in words:
outfile.write(w + "\n")
return word_to_id
def _file_to_word_ids(filename, word_to_id):
print('Converting %s to IDs' % (filename), file=sys.stderr)
data_in = _read_words(filename)
data_out = _read_words(filename + ".out")
ids_in = [word_to_id[word] for word in data_in if word in word_to_id]
ids_out = [word_to_id[word] for word in data_out if word in word_to_id]
print(' ', len(ids_in),ids_in[-1],'|', len(ids_out), ids_out[-1], file=sys.stderr)
assert(len(ids_in) == len(ids_out))
return [(x,y) for x, y in zip(ids_in, ids_out)]
def lm_raw_data(data_path=None):
"""Load LM raw data from data directory "data_path".
Reads LM text files, converts strings to integer ids,
and performs mini-batching of the inputs.
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "lm.train.txt")
valid_path = os.path.join(data_path, "lm.valid.txt")
test_path = os.path.join(data_path, "lm.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return train_data, valid_data, test_data, vocabulary
def lm_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "LMProducer", [raw_data, batch_size, num_steps]):
raw_data_in = [cp[0] for cp in raw_data]
raw_data_out = [cp[1] for cp in raw_data]
raw_data_in = tf.convert_to_tensor(raw_data_in, name="raw_data", dtype=tf.int32)
raw_data_out = tf.convert_to_tensor(raw_data_out, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data_in)
batch_len = data_len // batch_size
data_in = tf.reshape(raw_data_in[0 : batch_size * batch_len],
[batch_size, batch_len])
data_out = tf.reshape(raw_data_out[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.compat.v1.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data_in, [0, i * num_steps],
[batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data_out, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1])
y.set_shape([batch_size, num_steps])
return x, y
|
the-stack_0_11697 | import tensorflow as tf
class GAINGenerator(object):
"""This is class to impute missing value with proper values from
observed data and missing mask (0/1 flags indicating missing value)
"""
def __init__(self):
pass
def generate(self, x, m, z, drop):
"""Generate candidate values to be imputated.
Parameters
----------
x : tf.Tensor of tf.float32
data frame which is target of missing value imputation.
m : tf.Tensor of tf.bool
mask data indicating missing positions in x.
(if True, observed ; same size as x)
z : tf.Tensor of tf.float32
data frame each cell of which has random numbers
to generate imputed values (same size as x)
Returns
-------
xbar : tf.Tensor of tf.float32
generated data frame which has candidate values
(even in observed cell)
"""
assert x.shape.as_list() == m.shape.as_list() == z.shape.as_list()
assert x.dtype == z.dtype == tf.float32
assert m.dtype == tf.bool
mf = tf.cast(m, dtype=tf.float32, name="mask_float")
out = tf.concat([x, mf, z], axis=1, name="concat")
d = x.shape[1]
out = tf.layers.dense(out, d, activation=tf.tanh, name="dense1")
out = tf.layers.dropout(out, drop)
out = tf.layers.dense(out, int(int(d)/2), activation=tf.tanh, name="dense2")
out = tf.layers.dropout(out, drop)
out = tf.layers.dense(out, d, activation=tf.sigmoid, name="dense3")
xbar = out
return xbar
def impute(self, x, xbar, m):
"""Do missing value imputation. This method uses candidate
values in xbar (which is generated by generate method)
Parameters
----------
x : tf.Tensor of tf.float32
data frame which is target of missing value imputation.
xbar : tf.Tensor of tf.float32
data frame which is result of generate method.
all of missing value of x are imputed by candidate values.
(same size as x)
m : tf.Tensor of tf.bool
mask data indicating missing positions (if True, observed)
Returns
-------
xhat : tf.Tensor of tf.float32
result of missing value imputation of x.
"""
assert x.shape.as_list() == xbar.shape.as_list() == m.shape.as_list()
assert x.dtype == xbar.dtype == tf.float32
assert m.dtype == tf.bool
xhat = tf.where(m, x, xbar)
return xhat
def adversarial_loss(self, mhat, m, b):
"""Calculate adversarial loss. This method compares
actual missing mask from output of discriminator, and
uses hint (b).
Parameters
----------
mhat : tf.Tensor of tf.float32
A prediction result of missing mask of discriminator.
It contains probability whether it is observed.
m : tf.Tensor of tf.bool
actual missing mask (same size as mhat)
b : tf.Tensor of tf.bool
Hint flag data
each row has only one True, which is selected at random.
The other cells are False (same size as mhat)
Returns
-------
loss : tf.Tensor of tf.float32 (no dimension)
adversarial loss calculated
"""
assert mhat.shape.as_list() == m.shape.as_list() == b.shape.as_list()
assert mhat.dtype == tf.float32
assert m.dtype == b.dtype == tf.bool
eps = 1e-7
log_loss = - tf.where(m, tf.zeros_like(m, dtype=tf.float32),
tf.log(mhat + eps))
loss = tf.reduce_sum(tf.where(b, log_loss,
tf.zeros_like(b, dtype=tf.float32)))
return loss
def generate_loss(self, x, xbar, m):
"""Calculate generate loss.
The more x is similar to xbar, the less loss is.
Parameters
----------
x : tf.Tensor of tf.float32
data frame which is target of missing value imputation.
xbar : tf.Tensor of tf.float32
data frame which is result of generate method.
all of missing value of x are imputed by candidate values.
(same size as x)
m : tf.Tensor of tf.bool
mask data indicating missing positions in x by 0/1 flag
(0=missing, 1=observed ; same size as x)
Returns
-------
loss : tf.Tensor of tf.float32 (no dimension)
generate loss calculated
"""
assert x.shape.as_list() == xbar.shape.as_list() == m.shape.as_list()
assert x.dtype == xbar.dtype == tf.float32
assert m.dtype == tf.bool
mse = tf.square(x - xbar)
loss = tf.reduce_sum(tf.where(m, mse,
tf.zeros_like(m, dtype=tf.float32)))
return loss
|
the-stack_0_11698 | import unittest2
import os
import tempfile
import shutil
import pip
from hoplite.client.status_updater import MockStatusUpdater
from hoplite.builtin_plugins.constants import InstallPythonPackageJobConstants as KEYS
from hoplite.builtin_plugins import install_python_package_job
from httmock import urlmatch, response, HTTMock
# Monkey patch pip so it doesn't mess with logging. Otherwise, presence of nose xunit logging handlers will cause an
# error when pip tries to set logging things
def blank_func(blank_arg):
pass
import pip.basecommand
pip.basecommand.__dict__['logging_dictConfig'] = blank_func
@urlmatch(path='/reload$')
def reload_site_packages(url, request):
return response(200)
class TestInstallPythonPackage(unittest2.TestCase):
def test_install_from_local_path(self):
setup_str = "from setuptools import setup, find_packages;setup(name='poopy', version='0.1', packages=find_packages())"
tempdir = tempfile.mkdtemp()
try:
setup_py = open(os.path.join(tempdir, "setup.py"), 'w')
setup_py.write(setup_str)
setup_py.close()
package_path = os.path.join(tempdir, "poopy")
os.mkdir(package_path)
init_file = open(os.path.join(package_path, "__init__.py"), 'w')
init_file.close()
config = {KEYS.LOCAL_PATH: tempdir}
status = MockStatusUpdater()
with HTTMock(reload_site_packages):
install_python_package_job.run(config, status)
self.assertTrue(status.status["succeeded"])
try:
import poopy
except ImportError:
self.fail("Could not import installed package")
finally:
pip.main(['uninstall', '-y', "poopy"])
shutil.rmtree(tempdir)
def test_install_fails_success_false_stdout_info(self):
setup_str = "raise ValueError('I FAILED!')"
tempdir = tempfile.mkdtemp()
try:
setup_py = open(os.path.join(tempdir, "setup.py"), 'w')
setup_py.write(setup_str)
setup_py.close()
config = {KEYS.LOCAL_PATH: tempdir}
status = MockStatusUpdater()
install_python_package_job.run(config, status)
self.assertFalse(status.status["succeeded"])
# Because we monkey patch pip so it doesn't mess up nose xunit logging, the traceback info goes to the
# console rather than to status.status.stdout
#self.assertRegexpMatches(status.status["stdout"], "Traceback")
self.assertIn("Pip returned a non-zero error code", status.status["errors"])
finally:
shutil.rmtree(tempdir)
def test_missing_local_path_returns_errors(self):
config = {}
status = MockStatusUpdater()
install_python_package_job.run(config, status)
self.assertFalse(status.status["succeeded"])
self.assertIn("No local path specified", status.status["errors"]) |
the-stack_0_11701 | from decimal import Decimal as D
from oscar.core.loading import get_class
from oscar.test import factories
Default = get_class('partner.strategy', 'Default')
def add_product(basket, price=None, quantity=1, product=None):
"""
Helper to add a product to the basket.
"""
has_strategy = False
try:
has_strategy = hasattr(basket, 'strategy')
except RuntimeError:
pass
if not has_strategy:
basket.strategy = Default()
if price is None:
price = D('1')
if product and product.has_stockrecords:
record = product.stockrecords.all()[0]
else:
record = factories.create_stockrecord(
product=product, price_excl_tax=price,
num_in_stock=quantity + 1)
basket.add_product(record.product, quantity)
def add_products(basket, args):
"""
Helper to add a series of products to the passed basket
"""
for price, quantity in args:
add_product(basket, price, quantity)
|
the-stack_0_11702 | import pytest
import spacy
from spacy.language import Language
from timexy.languages.en import en
label = "timexy_label"
lang = "en"
@pytest.fixture()
def nlp() -> Language:
nlp = spacy.blank(lang)
nlp.add_pipe("timexy", config={"label": label})
return nlp
test_data = [t for rule in en.rules for t in rule.tests]
@pytest.mark.parametrize("text,date_start,date_end", test_data)
def test_rule(nlp: Language, text: str, date_start: int, date_end: int) -> None:
doc = nlp(text)
assert [
e
for e in doc.ents
if e.start_char == date_start and e.end_char == date_end and e.label_ == label
]
|
the-stack_0_11703 | #AQUÍ irán las pruebas que se realicen sobre GitHub.
#Importamos las librerías necesarias.
from github import Github
from github.GithubException import UnknownObjectException
import aux_functions as aux
import dataF_functions as d
import ci_tools as ci
import github_search as ghs
# import openpyxl --> esta hay que instalarla en el venv para que funcione el generarEXCEL.
# Generamos un github_token para consultar la API de GitHub a través de la librería.
user = "jorcontrerasp"
token = aux.readFile("tokens/github_token.txt")
g = Github(user, token)
ciTool = ci.HerramientasCI.CI2
# zhihu/Matisse
# EOSIO/eos
# AMAI-GmbH/AI-Expert-Roadmap
# jwasham/coding-interview-university
# gztchan/awesome-design
# agalwood/motrix
# kr328/clashforandroid
# salomonelli/best-resume-ever
# facebook/create-react-app
# goldbergyoni/nodebestpractices
# freecad/freecad
# vuejs/core
# artf/grapesjs
# cocoapods/cocoapods
# google/gvisor
# adguardteam/adguardhome
# playframework/playframework
# hackiftekhar/IQKeyboardManager -> ¿etiqueta matrix?
# neovim/neovim -> en los jobs, ¿recojo la etiqueta 'env' como steps?
repoName = "neovim/neovim"
doTest = True
doSearchInAllCiTools = True
try:
repo = g.get_repo(repoName)
except UnknownObjectException as e:
print("El repositorio " + repoName + " no existe en GitHub: " + str(e))
doTest = False
if doTest:
filteredRepos = [repo]
df = d.makeDataFrame(filteredRepos, True)
df2 = d.makeCounterDataFrame()
df3 = d.makeEmptyLanguageDataFrame()
df6 = d.makeEmptyStageStatisticsDataFrame()
if doSearchInAllCiTools:
foundList = []
foundList = ghs.searchReposGitHubApi(filteredRepos, df, df2, df3, df6)
d.makeEXCEL(df2, "_counting")
else:
found,df,df3,df6 = ghs.searchLiteralPathFromRoot(repo, ciTool, df, df2, df3, df6)
#found,df,df3,df6 = ghs.searchLiteralPathFromRoot_REC(repo, ciTool, [], df, df2, df3, df6, [])
df,df2,df4,df5 = d.doAuxWithResultsDF(df, df2, df3, True)
d.makeEXCEL(df, "github/_github_results")
d.makeEXCEL(df2, "github/_counting")
d.makeEXCEL(df3, "github/_github_languages")
d.makeEXCEL(df4, "github/_github_language_statistics")
d.makeEXCEL(df5, "github/_github_ci_statistics")
d.makeEXCEL(df6, "github/_gitlab_stage_statistics")
print("Fin de la prueba.") |
the-stack_0_11704 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import io
import select
import socket
def safe_select(*args, **kwargs):
# N.B. This while loop is purely to facilitate SA_RESTART-like behavior for select(), which is
# (apparently) not covered by signal.siginterrupt(signal.SIGINT, False) when a timeout is passed.
# This helps avoid an unhandled select.error(4, 'Interrupted system call') on SIGINT.
# See https://bugs.python.org/issue12224 for more info.
while 1:
try:
return select.select(*args, **kwargs)
except select.error as e:
if e[0] != errno.EINTR:
raise
class RecvBufferedSocket(object):
"""A socket wrapper that simplifies recv() buffering."""
def __init__(self, socket, chunk_size=io.DEFAULT_BUFFER_SIZE, select_timeout=None):
"""
:param socket socket: The socket.socket object to wrap.
:param int chunk_size: The smallest max read size for calls to recv() in bytes.
:param float select_timeout: The select timeout for a socket read in seconds. An integer value
effectively makes self.recv non-blocking (default: None, blocking).
"""
self._socket = socket
self._chunk_size = chunk_size
self._select_timeout = select_timeout
self._buffer = b''
def recv(self, bufsize):
"""Buffers up to _chunk_size bytes when the internal buffer has less than `bufsize` bytes."""
assert bufsize > 0, 'a positive bufsize is required'
if len(self._buffer) < bufsize:
readable, _, _ = safe_select([self._socket], [], [], self._select_timeout)
if readable:
recvd = self._socket.recv(max(self._chunk_size, bufsize))
self._buffer = self._buffer + recvd
return_buf, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return return_buf
def __getattr__(self, attr):
return getattr(self._socket, attr)
|
the-stack_0_11705 | from django.test import TestCase
from .models import Item
class TestModels(TestCase):
def test_new_item_defaults_to_done_false(self):
item = Item.objects.create(name="Test DoneFalse Item")
self.assertFalse(item.done)
def test_item_string_method_returns_name(self):
item_name = "Test Item StringMethod"
item = Item.objects.create(name=item_name)
self.assertEqual(str(item), item_name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.