ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b405ecd5e305bde38a974663b2c1ad9fa928da90 | import pandas as pd
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# Read data from Titanic dataset.
titanic_url = ('https://raw.githubusercontent.com/amueller/'
'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')
data = pd.read_csv(titanic_url)
# We will train our classifier with the following features:
# Numeric Features:
# - age: float.
# - fare: float.
# Categorical Features:
# - embarked: categories encoded as strings {'C', 'S', 'Q'}.
# - sex: categories encoded as strings {'female', 'male'}.
# - pclass: ordinal integers {1, 2, 3}.
# We create the preprocessing pipelines for both numeric and categorical data.
numeric_features = ['age', 'fare']
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['embarked', 'sex', 'pclass']
# Replacing missing values with Modal value and then one-hot encoding.
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
# Final preprocessor object set up with ColumnTransformer...
preprocess = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
X = data.drop('survived', axis=1)
X = data.drop('name', axis=1)
y = data['survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
preprocess = preprocess.fit(X_train)
def preprocessor(data):
import sklearn
preprocessed_data=preprocess.transform(data)
return preprocessed_data
|
py | b405ecf3c933fa49cb6e9df5051c715859fae114 | # Copyright 2021, Autonomous Space Robotics Lab (ASRL)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import rclpy
from geometry_msgs.msg import Pose2D
from vtr_messages.msg import GraphPin
from vtr_messages.srv import GraphRelaxation, GraphCalibration, GraphPinning
# A thread lock for ROS to avoid synchronization issues
ros_rlock = threading.RLock()
def ros_service_request(node, path, mtype, request):
ros_service = node.create_client(mtype, path)
while not ros_service.wait_for_service(timeout_sec=1.0):
node.get_logger().info('service not available, waiting again...')
with ros_rlock: # (yuchen) isn't this equivalent to call(request)?
response = ros_service.call_async(request)
rclpy.spin_until_future_complete(node, response)
return response.result()
def get_graph(node, seq):
"""Get the relaxed pose graph from the map server"""
request = GraphRelaxation.Request()
request.seq = int(seq)
request.update_graph = False # TODO not used?
request.project = True # TODO always True?
return ros_service_request(node, "relaxed_graph", GraphRelaxation, request)
def move_graph(node, x, y, theta, scale):
"""Update lat lng of the pose graph shown on map"""
request = GraphCalibration.Request()
request.t_delta = Pose2D(x=x, y=y, theta=theta)
request.scale_delta = scale
return ros_service_request(node, "update_calib", GraphCalibration, request)
def pin_graph(node, pins):
"""Add vertex to latlng correspondence pins"""
request = GraphPinning.Request()
for pin in pins:
pin_msg = GraphPin()
pin_msg.id = int(pin["id"])
pin_msg.lat = float(pin["latLng"]["lat"])
pin_msg.lng = float(pin["latLng"]["lng"])
pin_msg.weight = float(pin["weight"])
request.pins.append(pin_msg)
return ros_service_request(node, "pin_graph", GraphPinning, request) |
py | b405ecf3ea1cbb2972db3f724a7787412514677a | """Code examples for using the Ouster SDK.
These modules are provided for documentation and testing purposes only, and
should not be considered a stable public API.
"""
|
py | b405ee6a0a81bcae62b16a96142827a1b7ab7983 | #!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('base.camera_' + os.environ['CAMERA']).Camera
else:
from base.camera import Camera
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
|
py | b405eebdb29606a696feb9f4c604fff9bdaaa090 | class Layout(object):
def __init__(self):
self.components = []
self.width = 128 # epd2in13.EPD_WIDTH
self.height = 250 # epd2in13.EPD_HEIGHT
def add(self, component):
if isinstance(component, (list,)):
self.components.extend( component )
else:
self.components.append( component )
def round_to(self, value, res):
"""
Round to e.g., 0.5, 0.02, 10, etc.
"""
if res == 0:
return round(value)
return res * (round(value/res))
|
py | b405efe270a03d44a0e5e4080b1a8befa070f6d9 | #!/usr/bin/env python
# encode: utf8
from scipy.io import loadmat
from scipy.signal import freqresp
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
plt.close('all')
f = loadmat('data/tf.mat')['f'][0, :]
rawdata = loadmat('data/tf.mat')['h0'][0, :]
A,B,C,D = np.load('data/ss_param.npy')
_, Hfit = freqresp((A,B,C,D), 2*np.pi*f)
plt.figure(figsize=(4,3))
plt.plot(f, abs(rawdata), '-b', label='Data')
plt.plot(f, abs(Hfit), '-r', label='Fitted data')
plt.ylabel('Amplitude [in mm/A]')
plt.xlabel('Frequency [in Hz]')
plt.yscale('log')
plt.xscale('log')
plt.grid(which='both')
plt.ylim([min(abs(rawdata)),11])
plt.xlim([0.3,f[-1]])
plt.legend(loc='best', frameon=True, fancybox=True)
sns.despine()
plt.tight_layout()
plt.savefig('ctl_id_amplitude.pdf')
plt.figure(figsize=(4,3))
plt.plot(f, np.unwrap(np.angle(rawdata))*180/np.pi, '-b', label='Data')
plt.plot(f, np.unwrap(np.angle(Hfit))*180/np.pi, '-r', label='Fitted data')
plt.ylabel('Phase [in deg]')
plt.xlabel('Frequency [in Hz]')
plt.xscale('log')
plt.grid(which='both')
plt.ylim(80,200)
plt.xlim([0.3,f[-1]])
plt.legend(loc='best', frameon=True, fancybox=True)
sns.despine()
plt.tight_layout()
plt.savefig('ctl_id_phase.pdf')
plt.show()
|
py | b405f0169d6ed6d905b72162f3ad3127b0a95b5d | import contextlib
import sys
import os
import unittest
from test import support
import time
resource = support.import_module('resource')
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def test_args(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(support.TESTFN, "wb")
try:
f.write(b"X" * 1024)
try:
f.write(b"Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except OSError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
support.unlink(support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
try:
usage_thread = resource.getrusage(resource.RUSAGE_THREAD)
except (ValueError, AttributeError):
pass
# Issue 6083: Reference counting bug
def test_setrusage_refcount(self):
try:
limits = resource.getrlimit(resource.RLIMIT_CPU)
except AttributeError:
pass
else:
class BadSequence:
def __len__(self):
return 2
def __getitem__(self, key):
if key in (0, 1):
return len(tuple(range(1000000)))
raise IndexError
resource.setrlimit(resource.RLIMIT_CPU, BadSequence())
def test_pagesize(self):
pagesize = resource.getpagesize()
self.assertIsInstance(pagesize, int)
self.assertGreaterEqual(pagesize, 0)
@unittest.skipUnless(sys.platform == 'linux', 'test requires Linux')
def test_linux_constants(self):
for attr in ['MSGQUEUE', 'NICE', 'RTPRIO', 'RTTIME', 'SIGPENDING']:
with contextlib.suppress(AttributeError):
self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
@support.requires_freebsd_version(9)
def test_freebsd_contants(self):
for attr in ['SWAP', 'SBSIZE', 'NPTS']:
with contextlib.suppress(AttributeError):
self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
@unittest.skipUnless(hasattr(resource, 'prlimit'), 'no prlimit')
@support.requires_linux_version(2, 6, 36)
def test_prlimit(self):
self.assertRaises(TypeError, resource.prlimit)
self.assertRaises(ProcessLookupError, resource.prlimit,
-1, resource.RLIMIT_AS)
limit = resource.getrlimit(resource.RLIMIT_AS)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS), limit)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS, limit),
limit)
# Issue 20191: Reference counting bug
@unittest.skipUnless(hasattr(resource, 'prlimit'), 'no prlimit')
@support.requires_linux_version(2, 6, 36)
def test_prlimit_refcount(self):
class BadSeq:
def __len__(self):
return 2
def __getitem__(self, key):
return limits[key] - 1 # new reference
limits = resource.getrlimit(resource.RLIMIT_AS)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS, BadSeq()),
limits)
def test_main(verbose=None):
support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main()
|
py | b405f180c94994c1adac56a5d302fc7f7769a343 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Cache files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
def testParse(self):
"""Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
py | b405f2705b11ef9b74437837c9fb82cfd88fbceb | import fluidity_tools
def pznd(state, parameters):
'''Calculate sources and sinks for a simple PZND model'''
if not check_pznd_parameters(parameters):
raise TypeError("Missing Parameter")
P=state.scalar_fields["Phytoplankton"]
Z=state.scalar_fields["Zooplankton"]
N=state.scalar_fields["Nutrient"]
D=state.scalar_fields["Detritus"]
I=state.scalar_fields["_PAR"] # Note: *NOT* PhotosyntheticRadation field, but the internal _PAR field, which
# has been projected onto the same mesh as phytoplankton and has been converted from
# incident radiation to just the active part.
Pnew=state.scalar_fields["IteratedPhytoplankton"]
Znew=state.scalar_fields["IteratedZooplankton"]
Nnew=state.scalar_fields["IteratedNutrient"]
Dnew=state.scalar_fields["IteratedDetritus"]
coords=state.vector_fields["Coordinate"]
P_source=state.scalar_fields["PhytoplanktonSource"]
Z_source=state.scalar_fields["ZooplanktonSource"]
N_source=state.scalar_fields["NutrientSource"]
D_source=state.scalar_fields["DetritusSource"]
N_abs=state.scalar_fields["NutrientAbsorption"]
try:
PP=state.scalar_fields["PrimaryProduction"]
except KeyError:
PP=None
try:
PG=state.scalar_fields["PhytoplanktonGrazing"]
except KeyError:
PG=None
alpha=parameters["alpha"]
beta=parameters["beta"]
gamma=parameters["gamma"]
g=parameters["g"]
k_N=parameters["k_N"]
k=parameters["k"]
v=parameters["v"]
mu_P=parameters["mu_P"]
mu_Z=parameters["mu_Z"]
mu_D=parameters["mu_D"]
p_P=parameters["p_P"]
p_D=1-p_P
for n in range(P.node_count):
# Values of fields on this node.
P_n=max(.5*(P.node_val(n)+Pnew.node_val(n)), 0.0)
Z_n=max(.5*(Z.node_val(n)+Znew.node_val(n)), 0.0)
N_n=max(.5*(N.node_val(n)+Nnew.node_val(n)), 0.0)
D_n=max(.5*(D.node_val(n)+Dnew.node_val(n)), 0.0)
I_n=max(I.node_val(n), 0.0)
if (I_n < 0.0001):
I_n = 0.0
# Light limited phytoplankton growth rate.
J=(v*alpha*I_n)/(v**2+alpha**2*I_n**2)**0.5
# Nitrate limiting factor.
Q=N_n/(k_N+N_n)
# Total phytoplankton growth rate.
R_P=J*P_n*Q
# Zooplankton grazing of phytoplankton.
G_P=(g * p_P * P_n**2 * Z_n)/(k**2 + p_P*P_n**2 + p_D*D_n**2)
# Zooplankton grazing of detritus.
G_D=(g * (1-p_P) * D_n**2 * Z_n)/(k**2 + p_P*P_n**2 + p_D*D_n**2)
# Death rate of phytoplankton.
De_P=mu_P*P_n*P_n/(P_n+0.2)
# Death rate of zooplankton.
De_Z=mu_Z*Z_n*Z_n*Z_n/(Z_n+3)
# Detritus remineralisation.
De_D=mu_D*D_n
P_source.set(n, R_P - G_P - De_P)
if PP:
PP.set(n, R_P)
if PG:
PG.set(n, G_P)
Z_source.set(n, gamma*beta*(G_P+G_D) - De_Z)
N_source.set(n, -R_P + De_D + (1-gamma)*beta*(G_P+G_D))
D_source.set(n, -De_D + De_P + De_Z +(1-beta)*G_P - beta*G_D)
def check_pznd_parameters(parameters):
from sys import stderr
valid=True
if not parameters.has_key("alpha"):
stderr.write("PZND parameter alpha missing.\n")
stderr.write("alpha is this initial slope of the P-I curve.\n\n")
valid = False
if not parameters.has_key("beta"):
stderr.write("PZND parameter beta missing.\n")
stderr.write("beta is the assimilation efficiency of zooplankton.\n\n")
valid = False
if not parameters.has_key("gamma"):
stderr.write("PZND parameter gamma missing.\n")
stderr.write("gamma is the zooplankton excretion parameter.\n\n")
valid = False
if not parameters.has_key("g"):
stderr.write("PZND parameter g missing.\n")
stderr.write("g is the zooplankton maximum growth rate.\n\n")
valid = False
if not parameters.has_key("k_N"):
stderr.write("PZND parameter k_N missing.\n")
stderr.write("k_N is the half-saturation constant for nutrient.\n\n")
valid = False
if not parameters.has_key("k"):
stderr.write("PZND parameter k missing.\n")
stderr.write("k is the zooplankton grazing parameter.\n\n")
valid = False
if not parameters.has_key("mu_P"):
stderr.write("PZND parameter mu_P missing.\n")
stderr.write("mu_P is the phytoplankton mortality rate.\n\n")
valid = False
if not parameters.has_key("mu_Z"):
stderr.write("PZND parameter mu_Z missing.\n")
stderr.write("mu_Z is the zooplankton mortality rate.\n\n")
valid = False
if not parameters.has_key("mu_D"):
stderr.write("PZND parameter mu_D missing.\n")
stderr.write("mu_D is the detritus remineralisation rate.\n\n")
valid = False
if not parameters.has_key("p_P"):
stderr.write("PZND parameter p_P missing.\n")
stderr.write("p_P is the relative grazing preference of zooplankton for phytoplankton.\n\n")
valid = False
if not parameters.has_key("v"):
stderr.write("PZND parameter v missing.\n")
stderr.write("v is the maximum phytoplankton growth rate.\n\n")
valid = False
return valid
def lotka_volterra(state,parameters):
if not check_lotka_volterra_parameters(parameters):
raise TypeError("Missing Parameter")
P=state.scalar_fields["Phytoplankton"]
Z=state.scalar_fields["Zooplankton"]
Pnew=state.scalar_fields["IteratedPhytoplankton"]
Znew=state.scalar_fields["IteratedZooplankton"]
P_source=state.scalar_fields["PhytoplanktonSource"]
Z_source=state.scalar_fields["ZooplanktonSource"]
alpha=parameters["alpha"]
beta=parameters["beta"]
gamma=parameters["gamma"]
delta=parameters["delta"]
for n in range(P.node_count):
# Values of fields on this node.
P_n=.5*(P.node_val(n)+Pnew.node_val(n))
Z_n=.5*(Z.node_val(n)+Znew.node_val(n))
P_source.set(n, P_n*(alpha-beta*Z_n))
Z_source.set(n, -Z_n*(gamma-delta*P_n))
def check_lotka_volterra_parameters(parameters):
from sys import stderr
valid=True
if not parameters.has_key("alpha"):
stderr.write("Lotka Voltera parameter alpha missing.\n")
valid = False
if not parameters.has_key("beta"):
stderr.write("Lotka Voltera parameter beta missing.\n")
valid = False
if not parameters.has_key("gamma"):
stderr.write("Lotka Voltera parameter gamma missing.\n")
valid = False
if not parameters.has_key("delta"):
stderr.write("Lotka Voltera parameter delta missing.\n")
valid = False
if not valid:
stderr.write(" dP/dt = P*(alpha-beta * Z)")
stderr.write(" dZ/dt = - Z*(gamma-delta * P)")
return valid
#############################################################
# #
# pczdna model #
# #
#############################################################
def six_component(state, parameters):
'''Calculate sources and sinks for pczdna biology model'''
# Based on the equations in
# Popova, E. E.; Coward, A. C.; Nurser, G. A.; de Cuevas, B.; Fasham, M. J. R. & Anderson, T. R.
# Mechanisms controlling primary and new production in a global ecosystem model - Part I:
# Validation of the biological simulation Ocean Science, 2006, 2, 249-266.
# DOI: 10.5194/os-2-249-2006
import math
if not check_six_component_parameters(parameters):
raise TypeError("Missing Parameter")
P=state.scalar_fields["Phytoplankton"]
C=state.scalar_fields["Chlorophyll"]
Z=state.scalar_fields["Zooplankton"]
N=state.scalar_fields["Nutrient"]
A=state.scalar_fields["Ammonium"]
D=state.scalar_fields["Detritus"]
I=state.scalar_fields["_PAR"] # Note: *NOT* PhotosyntheticRadation field, but the internal _PAR field, which
# has been projected onto the same mesh as phytoplankton and has been converted from
# incident radiation to just the active part.
Pnew=state.scalar_fields["IteratedPhytoplankton"]
Cnew=state.scalar_fields["IteratedChlorophyll"]
Znew=state.scalar_fields["IteratedZooplankton"]
Nnew=state.scalar_fields["IteratedNutrient"]
Anew=state.scalar_fields["IteratedAmmonium"]
Dnew=state.scalar_fields["IteratedDetritus"]
coords=state.vector_fields["Coordinate"]
P_source=state.scalar_fields["PhytoplanktonSource"]
C_source=state.scalar_fields["ChlorophyllSource"]
Z_source=state.scalar_fields["ZooplanktonSource"]
N_source=state.scalar_fields["NutrientSource"]
N_abs=state.scalar_fields["NutrientAbsorption"]
A_source=state.scalar_fields["AmmoniumSource"]
D_source=state.scalar_fields["DetritusSource"]
try:
PP=state.scalar_fields["PrimaryProduction"]
except KeyError:
PP=None
try:
PG=state.scalar_fields["PhytoplanktonGrazing"]
except KeyError:
PG=None
alpha_c=parameters["alpha_c"]
beta_P=parameters["beta_p"]
beta_D=parameters["beta_d"]
delta=parameters["delta"]
gamma=parameters["gamma"]
zeta=parameters["zeta"]
epsilon=parameters["epsilon"]
psi=parameters["psi"]
g=parameters["g"]
k_N=parameters["k_N"]
k_A=parameters["k_A"]
k_p=parameters["k_p"]
k_z=parameters["k_z"]
v=parameters["v"]
mu_P=parameters["mu_P"]
mu_Z=parameters["mu_Z"]
mu_D=parameters["mu_D"]
p_P=parameters["p_P"]
theta_m=parameters["theta_m"]
lambda_bio=parameters["lambda_bio"]
lambda_A=parameters["lambda_A"]
photicZoneLimit=parameters["photic_zone_limit"]
p_D=1-p_P
for n in range(P.node_count):
# Values of fields on this node.
P_n=max(.5*(P.node_val(n)+Pnew.node_val(n)), 0.0)
Z_n=max(.5*(Z.node_val(n)+Znew.node_val(n)), 0.0)
N_n=max(.5*(N.node_val(n)+Nnew.node_val(n)), 0.0)
A_n=max(.5*(A.node_val(n)+Anew.node_val(n)), 0.0)
C_n=max(.5*(C.node_val(n)+Cnew.node_val(n)), 0.0)
D_n=max(.5*(D.node_val(n)+Dnew.node_val(n)), 0.0)
I_n=max(I.node_val(n), 0.0)
depth=abs(coords.node_val(n)[2])
if (I_n < 0.0001):
I_n =0
# In the continuous model we start calculating Chl-a related
# properties at light levels close to zero with a potential /0.
# It seems that assuming theta = zeta at very low P and Chl takes
# care of this most effectively
if (P_n < 1e-7 or C_n < 1e-7):
theta = zeta
else:
theta = C_n/P_n*zeta # C=P_n*zeta
alpha = alpha_c * theta
# Light limited phytoplankton growth rate.
J=(v*alpha*I_n)/(v**2+alpha**2*I_n**2)**0.5
# Nitrate limiting factor.
Q_N=(N_n*math.exp(-psi * A_n))/(k_N+N_n)
# Ammonium limiting factor
Q_A=A_n/(k_A+A_n)
# Chl growth scaling factor
# R_P=(theta_m/theta)*J*(Q_N+Q_A)/(alpha*I_n+1e-7)
R_P=(theta_m/theta)*(Q_N+Q_A)*v/(v**2+alpha**2*I_n**2)**0.5
# Primary production
X_P=J*(Q_N+Q_A)*P_n
# Zooplankton grazing of phytoplankton.
# It looks a bit different from the original version, however
# it is the same function with differently normalised parameters to
# simplify tuning
# G_P=(g * epsilon * p_P * P_n**2 * Z_n)/(g+epsilon*(p_P*P_n**2 + p_D*D_n**2))
G_P=(g * p_P * P_n**2 * Z_n)/(epsilon + (p_P*P_n**2 + p_D*D_n**2))
# Zooplankton grazing of detritus. (p_D - 1-p_P)
# G_D=(g * epsilon * (1-p_P) * D_n**2 * Z_n)/(g+epsilon*(p_P*P_n**2 + p_D*D_n**2))
G_D=(g * (1-p_P) * D_n**2 * Z_n)/(epsilon + (p_P*P_n**2 + p_D*D_n**2))
# Death rate of phytoplankton.
# There is an additional linear term because we have a unified model
# (no below/above photoc zone distinction)
De_P=mu_P*P_n*P_n/(P_n+k_p)+lambda_bio*P_n
# Death rate of zooplankton.
# There is an additional linear term because we have a unified model
# (no below/above photoc zone distinction)
De_Z=mu_Z*Z_n**3/(Z_n+k_z)+lambda_bio*Z_n
# Detritus remineralisation.
De_D=mu_D*D_n+lambda_bio*P_n+lambda_bio*Z_n
# Ammonium nitrification (only below the photic zone)
# This is the only above/below term
De_A=lambda_A*A_n*(1-photic_zone(depth,100,20))
P_source.set(n, J*(Q_N+Q_A)*P_n - G_P - De_P)
C_source.set(n, (R_P*J*(Q_N+Q_A)*P_n + (-G_P-De_P))*theta/zeta)
Z_source.set(n, delta*(beta_P*G_P+beta_D*G_D) - De_Z)
D_source.set(n, -De_D + De_P + gamma*De_Z +(1-beta_P)*G_P - beta_D*G_D)
N_source.set(n, -J*P_n*Q_N+De_A)
A_source.set(n, -J*P_n*Q_A + De_D + (1 - delta)*(beta_P*G_P + beta_D*G_D) + (1-gamma)*De_Z-De_A)
if PP:
PP.set(n, X_P)
if PG:
PG.set(n, G_P)
def check_six_component_parameters(parameters):
from sys import stderr
valid=True
if not parameters.has_key("alpha_c"):
stderr.write("PCZNDA parameter alpha_c missing.\n")
stderr.write("alpha is the chlorophyll-specific inital slope of P-I curve.\n\n")
valid = False
if not parameters.has_key("beta_p"):
stderr.write("PCZNDA parameter beta_p missing.\n")
stderr.write("beta is the assimilation efficiency of zooplankton for plankton.\n\n")
valid = False
if not parameters.has_key("beta_d"):
stderr.write("PCZNDA parameter beta_d missing.\n")
stderr.write("beta is the assimilation efficiency of zooplankton for detritus.\n\n")
valid = False
if not parameters.has_key("delta"):
stderr.write("PCZNDA parameter delta missing.\n")
stderr.write("delta is the zooplankton excretion parameter.\n\n")
valid = False
if not parameters.has_key("gamma"):
stderr.write("PCZNDA parameter gamma missing.\n")
stderr.write("gamma is the zooplankton excretion parameter.\n\n")
valid = False
if not parameters.has_key("epsilon"):
stderr.write("PCZNDA parameter epsilon missing.\n")
stderr.write("epsilon is the grazing parameter relating the rate of prey item to prey density.\n\n")
valid = False
if not parameters.has_key("g"):
stderr.write("PCZNDA parameter g missing.\n")
stderr.write("g is the zooplankton maximum growth rate.\n\n")
valid = False
if not parameters.has_key("k_A"):
stderr.write("PCZNDA parameter k_A missing.\n")
stderr.write("k_A is the half-saturation constant for ammonium.\n\n")
valid = False
if not parameters.has_key("k_p"):
stderr.write("PCZNDA parameter k_p missing.\n")
stderr.write("k_ is something to do with mortatility rate of phytoplankton")
if not parameters.has_key("k_z"):
stderr.write("PCZNDA parameter k_z missing.\n")
stderr.write("k_z is something to do with te mortality rate of zooplankton\n\n")
valid = False
if not parameters.has_key("k_N"):
stderr.write("PCZNDA parameter k_N missing.\n")
stderr.write("k_N is the half-saturation constant for nutrient.\n\n")
valid = False
if not parameters.has_key("mu_P"):
stderr.write("PCZNDA parameter mu_P missing.\n")
stderr.write("mu_P is the phytoplankton mortality rate.\n\n")
valid = False
if not parameters.has_key("mu_Z"):
stderr.write("PCZNDA parameter mu_Z missing.\n")
stderr.write("mu_Z is the zooplankton mortality rate.\n\n")
valid = False
if not parameters.has_key("mu_D"):
stderr.write("PCZNDA parameter mu_D missing.\n")
stderr.write("mu_D is the detritus remineralisation rate.\n\n")
valid = False
if not parameters.has_key("psi"):
stderr.write("PCZNDA parameter psi missing.\n")
stderr.write("psi is the strength of ammonium inibition of nitrate uptake\n\n")
valid = False
if not parameters.has_key("p_P"):
stderr.write("PCZNDA parameter p_P missing.\n")
stderr.write("p_P is the relative grazing preference of zooplankton for phytoplankton.\n\n")
valid = False
if not parameters.has_key("v"):
stderr.write("PCZNDA parameter v missing.\n")
stderr.write("v is the maximum phytoplankton growth rate.\n\n")
valid = False
if not parameters.has_key("theta_m"):
stderr.write("PCZNDA parameter theta_m missing.\n")
stderr.write("theta_m is the maximum Chlorophyll to C ratio.\n\n")
valid = False
if not parameters.has_key("zeta"):
stderr.write("PCZNDA parameter zeta missing.\n")
stderr.write("zeta is the conversion factor from gC to mmolN on C:N ratio of 6.5\n\n")
valid = False
if not parameters.has_key("lambda_bio"):
stderr.write("PCZNDA parameter lambda_bio missing.\n")
stderr.write("lambda_bio is rate which plankton turn to detritus below photic zone\n\n")
valid = False
if not parameters.has_key("lambda_A"):
stderr.write("PCZNDA parameter lambda_A missing.\n")
stderr.write("lambda_A nitrification rate below photic zone\n\n")
valid = False
if not parameters.has_key("photic_zone_limit"):
stderr.write("PCZNDA parameter photic_zone_limit missing.\n")
stderr.write("photic_zone_limit defines the base of the photic zone in W/m2\n\n")
valid = False
return valid
def photic_zone(z,limit,transition_length):
depth = abs(z)
if (depth < limit):
return 1.
elif (depth < limit+transition_length):
return 1.-(depth-limit)/float(transition_length)
else:
return 0.0
|
py | b405f2c64f4195c6f5e812cbf1dbf36d2d7335d9 | #!/usr/bin/python
# encoding=utf-8
'''
1. install sas3ircu
2. run as root
'''
import re
import commands
import json
ARRAYINFO = [
'DG/VD', # 'id',
'State', # Status of volume
'TYPE', # RAID level 'raid_level',
'Size',
'disks',
]
DISKINFO = [
'DID', # device id
'EID:SID', # EnclosureID:SlotID
'Size', # size
'Intf', # Protocol
'Model', # Model Number
'SN', # Serial No
'State',
'Med', # Drive Type
'Firmware Revision', # Firmware Revision
]
SAS_CMD = "/usr/local/bin/sas3ircu"
def get_controllers():
cmd = SAS_CMD + " LIST"
return_code, out = commands.getstatusoutput(cmd)
if return_code != 0:
return []
controllers = []
for line in out.split('\n'):
if re.match(r'^\s+[0-9]+\s+\w+.*', line):
ctl_index, ctl_type = line.split()[0], line.split()[1]
controllers.append((ctl_index, ctl_type))
return controllers
def get_arrays():
res = []
_ctl_list = get_controllers()
for ctl in _ctl_list:
res.extend(
[dict(zip(ARRAYINFO, a)) for a in _get_array(ctl[0])]
)
return res
def _get_array(ctrlnmbr):
cmd = SAS_CMD+' '+ctrlnmbr+' DISPLAY'
return_code, res = commands.getstatusoutput(cmd)
if return_code != 0:
return []
array_list = []
arrayid = None
state = ''
_type = ''
size = ''
disklist = []
skipped = False
for line in res.split('\n'):
if re.match('^IR Volume information.*$', line):
skipped = True
if re.match('^IR volume [0-9]+.*$', line):
skipped = False
if arrayid is not None:
array_list.append((arrayid, state, _type, size, disklist))
disklist = []
arrayid = re.match('^IR volume ([0-9]+).*$', line).group(1)
if not skipped:
if re.match(r'^\s*Status of volume.*$', line):
state = line.split(':')[1].strip()
if re.match(r'^\s*RAID level.*$', line):
_type = line.split(':')[1].strip()
if re.match(r'^\s*Size \(in MB\)\s+.*$', line):
size = line.split(':')[1].strip()
size = str(int(round((float(size) / 1000))))+'G'
if re.match(r'^\s*PHY\[[0-9]+\] Enclosure#/Slot#.*$', line):
disksid = ':'.join(line.split(':')[1:]).strip()
disklist.append(disksid)
if arrayid is not None:
array_list.append((arrayid, state, _type, size, disklist))
# ie: [0, 'Okay (OKY)', 'RAID1', '1800G', [['1', '0'], ['1', '1']]]
return array_list
def get_disks():
res = []
_ctl_list = get_controllers()
for ctl in _ctl_list:
for d in _get_disks(ctl[0]):
_disk = dict(zip(DISKINFO, d))
res.append(_disk)
return res
def _get_disks(ctrlnmbr):
cmd = SAS_CMD+' '+ctrlnmbr+' DISPLAY'
return_code, res = commands.getstatusoutput(cmd)
if return_code != 0:
return []
disk_list = []
diskid = -1
enclid = ''
slotid = ''
realid = ['', '']
disksize = ''
interface = ''
diskmodel = ''
diskserial = ''
state = ''
dirvetype = ''
firmwarerevision = ''
skipped = False
for line in res.split('\n'):
if re.match('^Device is a Enclosure services device.*$', line):
skipped = True
if re.match('^Device is a Hard disk.*$', line):
skipped = False
if diskid == -1:
diskid = diskid+1
else:
disk_list.append((str(diskid), realid, disksize, interface,
diskmodel, diskserial, state, dirvetype,
firmwarerevision))
diskid = diskid+1
if not skipped:
if re.match(r'^\s*Enclosure #.*$', line):
enclid = line.split(':')[1].strip()
if re.match(r'^\s*Slot #.*$', line):
slotid = line.split(':')[1].strip()
realid = ':'.join([enclid, slotid])
if re.match(r'^\s*State.*$', line):
state = line.split(':')[1].strip()
if re.match(r'^\s*Size.*$', line):
disksize = line.split(':')[1].split('/')[0].strip()
disksize = str(int(round((float(disksize) / 1000))))+'G'
if re.match(r'^\s*Model Number.*$', line):
diskmodel = line.split(':')[1].strip()
if re.match(r'^\s*Firmware Revision.*$', line):
firmwarerevision = line.split(':')[1].strip()
if re.match(r'^\s*Serial No.*$', line):
diskserial = line.split(':')[1].strip()
if re.match(r'^\s*Protocol.*$', line):
interface = line.split(':')[1].strip()
if re.match(r'^\s*Drive Type.*$', line):
dirvetype = line.split(':')[1].strip()
"""
ie:
{
"DID": "8", "EID:SID": "2:6", "Firmware Revision": "TAF0",
"Intf": "SATA", "Med": "SATA_HDD", "Model": "HGST HUS726060AL",
"SN": "NCHRVW4S", "Size": "5723G", "State": "Ready (RDY)"
},
"""
if diskid != -1:
disk_list.append((str(diskid), realid, disksize, interface, diskmodel,
diskserial, state, dirvetype, firmwarerevision))
return disk_list
def get_sas_storage_info():
import collections
res = collections.defaultdict(dict)
for _r in get_controllers():
_idx = _r[0]
res[_idx]['type'] = _r[1]
res[_idx]['arrays'] = {}
res[_idx]['disks'] = {}
for _array in _get_array(_idx):
_array_id = _array[0]
res[_idx]['arrays'][_array_id] = {}
res[_idx]['arrays'][_array_id].update(zip(ARRAYINFO, _array))
for _disk in _get_disks(_idx):
_disk_id = _disk[0]
res[_idx]['disks'][_disk_id] = {}
res[_idx]['disks'][_disk_id].update(zip(DISKINFO, _disk))
return dict(res)
def get_vdpd_storcli_format():
_return = []
vd_list = get_arrays()
pd_list = get_disks()
_return.append(
{
'PD LIST': pd_list,
'VD LIST': vd_list,
'Physical Drives': len(pd_list),
'Virtual Drives': len(vd_list),
}
)
return _return
if __name__ == '__main__':
try:
print json.dumps(
get_vdpd_storcli_format(),
default=repr,
indent=4,
sort_keys=True)
except Exception as ex:
print ex
import sys
sys.exit(1)
|
py | b405f3cf97a2e078f55acd6449b1ac31a9a54d85 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def reverse(self, head, k):
current = head
next = None
prev = None
count = 0
while current is not None and count < k:
next = current.next
current.next = prev
prev = current
current = next
count += 1
if next is not None:
head.next = self.reverse(next, k)
return prev
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printList(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
if __name__ == "__main__":
"""
from timeit import timeit
llist = LinkedList()
llist.push(9)
llist.push(8)
llist.push(7)
llist.push(6)
llist.push(5)
llist.push(4)
llist.push(3)
llist.push(2)
llist.push(1)
print(timeit(lambda: llist.printList(), number=10000)) # 0.35493226000107825
llist.head = llist.reverse(llist.head, 3)
print(timeit(lambda: llist.printList(), number=10000)) # 0.412117505000424
"""
|
py | b405f58962fb3c791d1f8b5ee3f823506169ea5c | #!/usr/bin/env python3
import time
import unittest
import numpy as np
import cereal.messaging as messaging
from selfdrive.test.helpers import with_processes
from selfdrive.camerad.snapshot.snapshot import get_snapshots
# only tests for EON and TICI
from selfdrive.hardware import EON, TICI
TEST_TIMESPAN = 30 # random.randint(60, 180) # seconds
SKIP_FRAME_TOLERANCE = 0
LAG_FRAME_TOLERANCE = 2 # ms
FPS_BASELINE = 20
CAMERAS = {
"frame": FPS_BASELINE,
"frontFrame": FPS_BASELINE // 2,
}
if TICI:
CAMERAS["frontFrame"] = FPS_BASELINE
CAMERAS["wideFrame"] = FPS_BASELINE
class TestCamerad(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not (EON or TICI):
raise unittest.SkipTest
# assert "SEND_REAR" in os.environ
# assert "SEND_FRONT" in os.environ
def _numpy_bgr2gray(self, im):
ret = np.clip(im[:,:,0] * 0.114 + im[:,:,1] * 0.587 + im[:,:,2] * 0.299, 0, 255).astype(np.uint8)
return ret
def _numpy_lap(self, im):
ret = np.zeros(im.shape)
ret += -4 * im
ret += np.concatenate([np.zeros((im.shape[0],1)),im[:,:-1]], axis=1)
ret += np.concatenate([im[:,1:],np.zeros((im.shape[0],1))], axis=1)
ret += np.concatenate([np.zeros((1,im.shape[1])),im[:-1,:]], axis=0)
ret += np.concatenate([im[1:,:],np.zeros((1,im.shape[1]))], axis=0)
ret = np.clip(ret, 0, 255).astype(np.uint8)
return ret
def _is_really_sharp(self, i, threshold=800, roi_max=np.array([8,6]), roi_xxyy=np.array([1,6,2,3])):
i = self._numpy_bgr2gray(i)
x_pitch = i.shape[1] // roi_max[0]
y_pitch = i.shape[0] // roi_max[1]
lap = self._numpy_lap(i)
lap_map = np.zeros((roi_max[1], roi_max[0]))
for r in range(lap_map.shape[0]):
for c in range(lap_map.shape[1]):
selected_lap = lap[r*y_pitch:(r+1)*y_pitch, c*x_pitch:(c+1)*x_pitch]
lap_map[r][c] = 5*selected_lap.var() + selected_lap.max()
print(lap_map[roi_xxyy[2]:roi_xxyy[3]+1,roi_xxyy[0]:roi_xxyy[1]+1])
if (lap_map[roi_xxyy[2]:roi_xxyy[3]+1,roi_xxyy[0]:roi_xxyy[1]+1] > threshold).sum() > \
(roi_xxyy[1]+1-roi_xxyy[0]) * (roi_xxyy[3]+1-roi_xxyy[2]) * 0.9:
return True
else:
return False
def _is_exposure_okay(self, i, med_ex=np.array([0.2,0.4]), mean_ex=np.array([0.2,0.6])):
i = self._numpy_bgr2gray(i)
i_median = np.median(i) / 256
i_mean = np.mean(i) / 256
print([i_median, i_mean])
return med_ex[0] < i_median < med_ex[1] and mean_ex[0] < i_mean < mean_ex[1]
@unittest.skip # skip for now
@with_processes(['camerad'])
def test_camera_operation(self):
print("checking image outputs")
if EON:
# run checks similar to prov
time.sleep(15) # wait for startup and AF
pic, fpic = get_snapshots()
self.assertTrue(self._is_really_sharp(pic))
self.assertTrue(self._is_exposure_okay(pic))
self.assertTrue(self._is_exposure_okay(fpic))
time.sleep(30)
# check again for consistency
pic, fpic = get_snapshots()
self.assertTrue(self._is_really_sharp(pic))
self.assertTrue(self._is_exposure_okay(pic))
self.assertTrue(self._is_exposure_okay(fpic))
elif TICI:
raise unittest.SkipTest # TBD
else:
raise unittest.SkipTest
@with_processes(['camerad'])
def test_frame_packets(self):
print("checking frame pkts continuity")
print(TEST_TIMESPAN)
sm = messaging.SubMaster([socket_name for socket_name in CAMERAS])
last_frame_id = dict.fromkeys(CAMERAS, None)
last_ts = dict.fromkeys(CAMERAS, None)
start_time_sec = time.time()
while time.time()- start_time_sec < TEST_TIMESPAN:
sm.update()
for camera in CAMERAS:
if sm.updated[camera]:
ct = (sm[camera].timestampEof if not TICI else sm[camera].timestampSof) / 1e6
if last_frame_id[camera] is None:
last_frame_id[camera] = sm[camera].frameId
last_ts[camera] = ct
continue
dfid = sm[camera].frameId - last_frame_id[camera]
self.assertTrue(abs(dfid - 1) <= SKIP_FRAME_TOLERANCE, "%s frame id diff is %d" % (camera, dfid))
dts = ct - last_ts[camera]
self.assertTrue(abs(dts - (1000/CAMERAS[camera])) < LAG_FRAME_TOLERANCE, "%s frame t(ms) diff is %f" % (camera, dts))
last_frame_id[camera] = sm[camera].frameId
last_ts[camera] = ct
time.sleep(0.01)
if __name__ == "__main__":
unittest.main()
|
py | b405f62acbe689922567f2762085def8986b8cdd | import time
import json
import os
import unittest
from web3 import Web3
from uniswap.uniswap import UniswapV2Client, UniswapV2Utils
class BaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.factory = Web3.toChecksumAddress("0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f")
cls.link_token = Web3.toChecksumAddress("0x20fe562d797a42dcb3399062ae9546cd06f63280")
cls.weth_token = Web3.toChecksumAddress("0xc778417E063141139Fce010982780140Aa0cD5Ab")
cls.link_weth_pair = Web3.toChecksumAddress("0x98A608D3f29EebB496815901fcFe8eCcC32bE54a")
with open(os.path.abspath(f"{os.path.dirname(os.path.abspath(__file__))}/config.json")) as f:
raw_json = json.load(f)
cls.address = Web3.toChecksumAddress(raw_json["account"]["address"])
cls.private_key = raw_json["account"]["private-key"]
cls.provider = raw_json["provider"]
cls.token_0 = raw_json["tokens"][0] # Token A for the ERC20-ERC20 pair
cls.token_1 = raw_json["tokens"][1] # Token B for the ERC20-ERC20 pair
cls.token_2 = raw_json["tokens"][2] # Token for the ERC20-WETH pair
uniswap = UniswapV2Client(cls.address, cls.private_key, provider=cls.provider)
# create a pair for token_0/token_1 if not already created
cls.token_pair = uniswap.get_pair(cls.token_0["address"], cls.token_1["address"])
if cls.token_pair == "0x0000000000000000000000000000000000000000":
print("Creating ERC20-ERC20 pair...")
token_tx = uniswap.add_liquidity(
token_a=cls.token_0["address"],
token_b=cls.token_1["address"],
amount_a=int(cls.token_0["supply"] * 10 ** -1), # 1/10 of the total supply of A
amount_b=int(cls.token_1["supply"] * 10 ** -1), # 1/10 of the total supply of B
min_a=int(cls.token_1["supply"] / cls.token_0["supply"] * 1.01), # allow 1% slippage on B/A
min_b=int(cls.token_0["supply"] / cls.token_1["supply"] * 1.01), # allow 1% slippage on B/A
to=cls.address,
deadline=int(time.time() + 10 ** 3))
uniswap.conn.eth.waitForTransactionReceipt(token_tx, timeout=2000)
cls.token_pair = uniswap.get_pair(cls.token_0["address"], cls.token_1["address"])
# create a pair for token_2/weth if not already created
cls.weth_pair = uniswap.get_pair(cls.token_2["address"], uniswap.get_weth_address())
if cls.weth_pair == "0x0000000000000000000000000000000000000000":
print("Creating ERC20-WETH pair...")
weth_tx = uniswap.add_liquidity_eth(
token=cls.token_2["address"],
amount_token=int(cls.token_2["supply"] * 10 ** -1), # 1/10 of the total supply of the token
amount_eth=100, # 100 wei
min_token=int(1000 / cls.token_2["supply"] * 1.01), # allow 1% slippage on B/A
min_eth=int(cls.token_2["supply"] / 1000 * 1.01), # allow 1% slippage on B/A
to=cls.address,
deadline=int(time.time() + 10 ** 3))
uniswap.conn.eth.waitForTransactionReceipt(weth_tx, timeout=2000)
cls.weth_pair = uniswap.get_pair(cls.token_2["address"], uniswap.get_weth_address())
class UniswapV2ClientTest(BaseTest):
def setUp(self):
self.uniswap = UniswapV2Client(self.address, self.private_key, self.provider)
def test_get_pair(self):
pair = self.uniswap.get_pair(self.token_0["address"], self.token_1["address"])
self.assertEqual(pair, self.token_pair)
def test_get_pair_swapped_order(self):
pair = self.uniswap.get_pair(self.token_1["address"], self.token_0["address"])
self.assertEqual(pair, self.token_pair)
def test_get_pair_not_found(self):
rand_token = Web3.toChecksumAddress("0xAE14A3B9F6B333BfF64bEAe1C70a93c0781D6A3F")
pair = self.uniswap.get_pair(self.token_0["address"], rand_token)
self.assertEqual(pair, "0x0000000000000000000000000000000000000000")
def test_get_num_pairs(self):
num_pairs = self.uniswap.get_num_pairs()
self.assertGreaterEqual(num_pairs, 50)
def test_get_pair_by_index(self):
pair = self.uniswap.get_pair_by_index(51)
self.assertEqual(pair, self.token_pair)
def test_get_pair_by_index_not_found(self):
pair = self.uniswap.get_pair_by_index(99999)
self.assertEqual(pair, "0x0000000000000000000000000000000000000000")
def test_get_fee(self):
fee = self.uniswap.get_fee()
self.assertEqual(fee, "0x0000000000000000000000000000000000000000")
def test_get_fee_setter(self):
fee_setter = self.uniswap.get_fee()
self.assertEqual(fee_setter, "0x0000000000000000000000000000000000000000")
def test_get_weth_address(self):
address = self.uniswap.get_weth_address()
self.assertEqual(address, self.weth_token)
def test_add_liquidity(self):
amount_a = int(self.token_0["supply"] * 10 ** -3) # 1/1000 of the total supply of A
amount_b = int(self.token_1["supply"] * 10 ** -3) # 1/1000 of the total supply of B
min_a = 0 # int((amount_b / amount_a) * 1.01) # allow 1% slippage on B/A
min_b = 0 # int((amount_a / amount_b) * 1.01) # allow 1% slippage on A/B
deadline = int(time.time()) + 1000
tx = self.uniswap.add_liquidity(
self.token_0["address"], self.token_1["address"], amount_a, amount_b, min_a, min_b, self.address, deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_add_liquidity_eth(self):
token = self.token_2["address"]
amount_token = int(self.token_2["supply"] * 10 ** -3) # 1/1000 of the total supply of the token
amount_eth = 1 # 1 wei
deadline = int(time.time()) + 1000
tx = self.uniswap.add_liquidity_eth(
token, amount_token, amount_eth, min_token=0, min_eth=0, to=self.address, deadline=deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
# FIXME add way to retrieve current liquidity balance for a par
"""def test_remove_liquidity(self):
tx = self.uniswap.remove_liquidity(
token_a=self.token_0["address"],
token_b=self.token_1["address"],
liquidity=100,
min_a=0,
min_b=0,
to=self.address,
deadline=int(time.time()) + 1000
)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_remove_liquidity_eth(self):
token = Web3.toChecksumAddress("0x20fe562d797a42dcb3399062ae9546cd06f63280")
liquidity = 1 * 10 ** 15
min_token = 1 * 10 ** 15
min_eth = 2 * 10 ** 13
deadline = int(time.time()) + 1000
tx = self.uniswap.remove_liquidity_eth(
token=self.token_2["address"],
liquidity=1,
min_token=0,
min_eth=0,
to=self.address,
deadline=int(time.time()) + 1000
)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])"""
def test_remove_liquidity_with_permit(self):
pass # TODO
def test_remove_liquidity_eth_with_permit(self):
pass # TODO
def test_swap_exact_tokens_for_tokens(self):
amount = int(self.token_0["supply"] * 10 ** -3)
min_out = int(self.token_1["supply"] * 10 ** -5)
path = [self.token_0["address"], self.token_1["address"]]
deadline = int(time.time()) + 1000
tx = self.uniswap.swap_exact_tokens_for_tokens(amount, min_out, path, to=self.address, deadline=deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_swap_tokens_for_exact_tokens(self):
amount_out = int(self.token_1["supply"] * 10 ** -5)
amount_in_max = int(self.token_0["supply"] * 10 ** -3)
path = [self.token_0["address"], self.token_1["address"]]
deadline = int(time.time()) + 1000
tx = self.uniswap.swap_tokens_for_exact_tokens(amount_out, amount_in_max, path, self.address, deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_swap_exact_eth_for_tokens(self):
amount = 10 # 10 wei
min_out = int(self.token_2["supply"] * 10 ** -5)
path = [self.uniswap.get_weth_address(), self.token_2["address"]]
deadline = int(time.time()) + 1000
tx = self.uniswap.swap_exact_eth_for_tokens(amount, min_out, path, self.address, deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_swap_tokens_for_exact_eth(self):
amount_out = 1 # 1 wei
amount_in_max = int(self.token_2["supply"] * 10 ** -3)
path = [self.token_2["address"], self.uniswap.get_weth_address()]
deadline = int(time.time()) + 1000
tx = self.uniswap.swap_tokens_for_exact_eth(amount_out, amount_in_max, path, self.address, deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_swap_exact_tokens_for_eth(self):
amount = int(self.token_2["supply"] * 10 ** -3)
min_out = 1 # 1 wei
path = [self.token_2["address"], self.uniswap.get_weth_address()]
deadline = int(time.time()) + 1000
tx = self.uniswap.swap_exact_tokens_for_eth(amount, min_out, path, self.address, deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
def test_swap_eth_for_exact_tokens(self):
amount_out = int(self.token_2["supply"] * 10 ** -5)
amount = 100 # 100 wei
path = [self.uniswap.get_weth_address(), self.token_2["address"]]
deadline = int(time.time()) + 1000
tx = self.uniswap.swap_eth_for_exact_tokens(amount_out, amount, path, self.address, deadline)
receipt = self.uniswap.conn.eth.waitForTransactionReceipt(tx, timeout=2000)
self.assertIsNotNone(receipt)
self.assertTrue(receipt["status"])
class UniswapV2UtilsTest(BaseTest):
def setup(self):
self.w3 = Web3(Web3.HTTPProvider(self.provider, request_kwargs={"timeout": 60}))
def test_sort_tokens_1(self):
token_0, token_1 = UniswapV2Utils.sort_tokens(self.link_token, self.weth_token)
self.assertEqual(token_0, self.link_token)
self.assertEqual(token_1, self.weth_token)
def test_sort_tokens_2(self):
token_0, token_1 = UniswapV2Utils.sort_tokens(self.weth_token, self.link_token)
self.assertEqual(token_0, self.link_token)
self.assertEqual(token_1, self.weth_token)
def test_sort_tokens_equal(self):
with self.assertRaises(AssertionError):
UniswapV2Utils.sort_tokens(self.link_token, self.link_token)
def test_sort_tokens_zero(self):
with self.assertRaises(AssertionError):
UniswapV2Utils.sort_tokens(Web3.toHex(0x0), self.link_token)
def test_pair_for_1(self):
pair = UniswapV2Utils.pair_for(self.factory, self.link_token, self.weth_token)
self.assertEqual(pair, self.link_weth_pair)
def test_pair_for_2(self):
pair = UniswapV2Utils.pair_for(self.factory, self.weth_token, self.link_token)
self.assertEqual(pair, self.link_weth_pair)
def test_get_reserves(self):
pass # TODO
def calculate_quote(self):
pass # TODO
def test_get_amount_out(self):
pass # TODO
def test_get_amount_in(self):
pass # TODO
def test_get_amounts_out(self):
pass # TODO
def test_get_amounts_in(self):
pass # TODO
|
py | b405f64a0bd5206aaa42caa201fabc7322ff67cd | import numpy as np
x = np.array([[42,22,12],[44,53,66]], order='F')
y = x.copy()
x[0,0] = 1001
print(x)
print(y)
|
py | b405f7493fec205724255df2e26a1176c42bf041 | from rest_framework import routers
from django.urls import path
from .views import SleepViewSet
router = routers.DefaultRouter()
router.register(r'api/sleep', SleepViewSet, 'sleep')
urlpatterns = router.urls |
py | b405f75aa63a2cb5180ec609e44cc7386111ecf9 | import matplotlib
import numpy
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
# figure out how to set this
# plt.style.use('seaborn-darkgrid')
matplotlib.rcParams.update({'font.size': 8})
self.fig = matplotlib.figure.Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
# not sure if this is necessary
# FigureCanvas.setSizePolicy(self,
# QtWidgets.QSizePolicy.Expanding,
# QtWidgets.QSizePolicy.Expanding)
# FigureCanvas.updateGeometry(self)
class PhysiologicalPlot(MyMplCanvas):
"""Simple canvas with a sine plot."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
super().__init__(parent, width, height, dpi)
self.aspan = None
self.plot = None
self.all_y_values = None
def set_y_values(self, y_values, start_pos, end_pos):
self.all_y_values = y_values
self.start_pos = start_pos
self.end_pos = end_pos
self.update_plot()
def update_plot(self, current_time=0, span=1200, highlight_span=50, full_plot=False):
self.figure.clf()
ax1 = self.figure.add_subplot(111)
if full_plot:
self.draw_full_plot(ax1)
else:
minimum = self.start_pos + current_time - span
maximum = self.start_pos + current_time + span
if minimum < 0:
minimum = 0
plot_y_values = self.all_y_values[minimum:maximum]
y_values_mask = numpy.isfinite(plot_y_values)
if len(plot_y_values) < (maximum - minimum):
maximum = len(plot_y_values) + minimum
x_axis = numpy.arange(minimum, maximum, 1)
self.plot = ax1.plot(x_axis[y_values_mask], plot_y_values[y_values_mask])
# todo evaluate adding a horizontal line for average condition and/or average of entire session
# add backgrounds for condition visualization
task_start = minimum
if self.start_pos > minimum:
task_start = self.start_pos
task_end = maximum
if maximum > self.end_pos:
task_end = self.end_pos
self.aspan = ax1.axvspan(task_start, task_end, color='blue', alpha=0.05)
# highlight current time
self.aspan = ax1.axvspan(self.start_pos + current_time - highlight_span, self.start_pos + current_time + highlight_span, color='red', alpha=0.2)
ax1.set_ylim(min(self.all_y_values), max(self.all_y_values))
self.fig.canvas.draw_idle()
def draw_full_plot(self, ax1):
y_values_mask = numpy.isfinite(self.all_y_values)
x_axis = numpy.arange(0, len(self.all_y_values), 1)
self.plot = ax1.plot(x_axis[y_values_mask], self.all_y_values[y_values_mask])
|
py | b405f7c0922aa0c8996992e8ed51e41c1c772e29 | # Importing the Kratos Library
import KratosMultiphysics
# Import applications
import KratosMultiphysics.FluidDynamicsApplication as KratosCFD
# Import base class file
from KratosMultiphysics.compute_drag_process import ComputeDragProcess
def Factory(settings, model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ComputeEmbeddedDragProcess(model, settings["Parameters"])
class ComputeEmbeddedDragProcess(ComputeDragProcess):
"""
The specific implementation for the output of embedded drag forces
over obstacles in fluid dynamics problems.
"""
def _GetFileHeader(self):
header = '# Embedded drag for model part ' + self.params["model_part_name"].GetString() + '\n'
header += '# Time Fx Fy Fz\n'
return header
def _PrintToScreen(self, result_msg):
KratosMultiphysics.Logger.PrintInfo("ComputeEmbeddedDragProcess","EMBEDDED DRAG RESULTS:")
KratosMultiphysics.Logger.PrintInfo("ComputeEmbeddedDragProcess","Current time: " + result_msg)
def _GetCorrespondingDragForce(self):
return KratosCFD.DragUtilities().CalculateEmbeddedDrag(self.model_part) |
py | b405f8407aec367b8685392e71e8699df854a679 | import pandas as pd
import oneparams.config as config
from datetime import time
from oneparams.utils import get_bool, get_float, get_time
from oneparams.utils import get_cel, wprint, check_email
def check_types(self, data):
excel = self.excel
types = data["types"]
length = data["length"]
if types == "string":
excel = excel.apply(lambda x: check_string(self, x, data, x.name),
axis=1)
elif types == "time":
excel = excel.apply(lambda x: check_time(self, x, data, x.name),
axis=1)
elif types == "float":
excel = excel.apply(lambda x: check_float(self, x, data, x.name),
axis=1)
elif types == "bool":
excel = excel.apply(lambda x: check_bool(self, x, data, x.name),
axis=1)
elif types == "cel":
excel = excel.apply(lambda x: check_cel(self, x, data, x.name), axis=1)
elif types == "email":
excel = excel.apply(lambda x: check_mail(self, x, data, x.name),
axis=1)
if length not in (0, None):
excel = excel.apply(
lambda x: check_length(self, x, data, x.name, length), axis=1)
return excel
def check_string(self, values, data, row):
"""
Verificações de tipo string
"""
key = data["key"]
if check_default(self, values, data):
values[key] = data["default"]
return values
values[key] = str(values[key]).strip()
return values
def check_float(self, values, data, row):
"""
Verificações de tipo float
"""
key = data["key"]
value = values[key]
if check_default(self, values, data):
if not pd.notnull(values[key]):
wprint("WARNING! in line {}, Column {}: number used will be {}".format(
self.row(row), key, data["default"]))
values[key] = data["default"]
return values
try:
value = get_float(value)
values[key] = value
except ValueError as exp:
print("ERROR! In line {}, Column {}: {}".format(
self.row(row), key, exp))
self.erros = True
finally:
return values
def check_time(self, values, data, row):
# antes de fazer a verificação do tipo,
# passa pela verificação padrão, se a função retornar
# True, continua, Falso retorna os valores sem alteração
key = data["key"]
if check_default(self, values, data):
if not pd.notnull(values[key]):
wprint("WARNING! In line {}, Column {}: Time used will be {}".format(
self.row(row), key, data["default"]))
values[key] = data["default"]
return values
value = values[key]
try:
index_value = get_time(value)
value = str(time(*index_value[:3]))
except TypeError as exp:
print("ERROR! In line {}, Column {}: {}".format(
self.row(row), key, exp))
self.erros = True
else:
values[key] = value
return values
def check_bool(self, values, data, row):
"""
Verifica se o valor pode ser convertido em booleano
retorna os mesmo valores com as devidas alterações,
se der algum problema coloca True em self.erros da class Excel
"""
# antes de fazer a verificação do tipo,
# passa pela verificação padrão, se a função retornar
# True, continua, Falso retorna os valores sem alteração
key = data["key"]
if check_default(self, values, data):
values[key] = data["default"]
return values
value = values[key]
value = str(value).strip()
value = get_bool(value)
if value is None:
print("ERROR! in line {}: not possible change value to bool".format(
self.row(row)))
self.erros = True
values[key] = value
return values
def check_cel(self, values, data, row):
"""
Verificações de telefone,
retira caracteres especiais deixando apenas números
caso não for valido, retorna None no campo
"""
key = data["key"]
value = values[key]
try:
value = get_cel(value)
except ValueError as exp:
if not pd.notnull(value):
wprint(
f'WARNING! in line {self.row(row)}, Column {key}: empty phone')
elif not config.RESOLVE_ERROS:
print(f'ERROR! in line {self.row(row)}, Column {key}: {exp}')
self.erros = True
else:
wprint(
f'WARNING! in line {self.row(row)}: Column {key}: {exp}'
)
value = data["default"]
values[key] = value
return values
def check_mail(self, values, data, row):
key = data["key"]
value = values[key]
if not check_email(value):
if not config.RESOLVE_ERROS:
print(f'ERROR! in line {self.row(row)}: Email {value} not valid')
self.erros = True
else:
wprint(
f'WARNING! in line {self.row(row)}: Email {value} not valid')
value = data["default"]
values[key] = value
return values
def check_length(self, values, data, row, length):
"""
Verifica se o tamanho do texto é menor que a
quantidade máxima permitida (length)
"""
key = data["key"]
if values[key] is None:
return values
if len(values[key]) > length and not config.RESOLVE_ERROS:
print(
f'ERROR! in line {self.row(row)}: Column {key} string {values[key]} size {len(values[key])}/{length}'
)
self.erros = True
elif len(values[key]) > length:
wprint(
f'WARNING: in line {self.row(row)}: Column {key} string {values[key]} size {len(values[key])}/{length}'
)
values[key] = values[key].strip()[:length]
return values
def check_default(self, value, data):
"""
Verifica se o valor é igual ao valor padrão,
se for retorna True, se não retorna False
"""
key = data["key"]
if not pd.notnull(value[key]):
return True
if value[key] == data["default"]:
return True
return False
|
py | b405f8517179bd00a713a73dd1afccd2340b3f2d | '''
Project: PayU Turkey Token v2 Services - Create Token with Ref No Python3 Sample Code
Author: Göktürk Enez
'''
import hmac
import hashlib
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import time
import collections
url = 'https://secure.payu.com.tr/order/token/v2/merchantToken/'
merchant = 'OPU_TEST'
secret = 'SECRET_KEY'
refNo = 57039798
TimeStamp = str(time.time()).split('.')[0]
array = collections.OrderedDict()
array['merchant'] = merchant
array['refNo'] = refNo
array['timestamp'] = TimeStamp
hashstring = ''
for k, v in array.items():
hashstring += str(v)
print(hashstring)
signature = hmac.new(secret.encode('utf-8'), hashstring.encode('utf-8'), hashlib.sha256).hexdigest()
array['signature'] = signature
print(signature)
request = Request(url, urlencode(array).encode())
response = urlopen(request).read().decode()
print(response)
|
py | b405f8807bf799b070a5abfe3eea9429daf46a66 | class DinnerPlates:
def __init__(self, capacity: int):
self.capacity = capacity
self.stack = []
def push(self, val: int) -> None:
if len(self.stack[-1]) < self.capacity:
self.stack[-1].append(val)
else:
self.stack.append([val])
def pop(self) -> int:
self.popAtStack(-1)
def popAtStack(self, index: int) -> int:
res = self.stack[index].pop()
if len(self.stack[index]) == 0:
self.stack.pop(index)
return res
'''
[[373, 86], [395, 306], [370], [41, 17], [387], [66], [27], [252, 6], [269, 231], [35, 346]]
[[373, 86], [395, 306], [41, 17], [387], [66], [27], [252, 6], [269, 231], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269, 231], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269], [35]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134], [1, 250], [19]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134], [1, 250]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134], [1]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11]]
''' |
py | b405f99cd7442a2261fd29a3b1eb12d348df694c | from base import DataSourceCollection, DataSourceBase
# we have to import these modules for them to register as valid data sources
import cfn_datasource
import yaml_datasource
import file_datasource
__all__ = ['DataSourceBase', 'DataSourceCollection']
|
py | b405fa4be52f55a302d279260d6f98b2954c1724 | import numpy as np
try:
from matplotlib.path import Path
except ImportError:
Path = None
from .grid import Grid, CachedData
from ..utils.geometry import is_clockwise
class VertexGrid(Grid):
"""
class for a vertex model grid
Parameters
----------
vertices
list of vertices that make up the grid
cell2d
list of cells and their vertices
Properties
----------
vertices
returns list of vertices that make up the grid
cell2d
returns list of cells and their vertices
Methods
----------
get_cell_vertices(cellid)
returns vertices for a single cell at cellid.
"""
def __init__(self, vertices=None, cell2d=None, top=None,
botm=None, idomain=None, lenuni=None, epsg=None, proj4=None,
prj=None, xoff=0.0, yoff=0.0, angrot=0.0,
nlay=None, ncpl=None, cell1d=None):
super(VertexGrid, self).__init__('vertex', top, botm, idomain, lenuni,
epsg, proj4, prj, xoff, yoff, angrot)
self._vertices = vertices
self._cell1d = cell1d
self._cell2d = cell2d
self._top = top
self._botm = botm
self._idomain = idomain
if botm is None:
self._nlay = nlay
self._ncpl = ncpl
else:
self._nlay = None
self._ncpl = None
@property
def is_valid(self):
if self._vertices is not None and (self._cell2d is not None or
self._cell1d is not None):
return True
return False
@property
def is_complete(self):
if self._vertices is not None and (self._cell2d is not None or
self._cell1d is not None) and \
super(VertexGrid, self).is_complete:
return True
return False
@property
def nlay(self):
if self._cell1d is not None:
return 1
elif self._botm is not None:
return len(self._botm)
else:
return self._nlay
@property
def ncpl(self):
if self._cell1d is not None:
return len(self._cell1d)
if self._botm is not None:
return len(self._botm[0])
else:
return self._ncpl
@property
def nnodes(self):
return self.nlay * self.ncpl
@property
def shape(self):
return self.nlay, self.ncpl
@property
def extent(self):
self._copy_cache = False
xvertices = np.hstack(self.xvertices)
yvertices = np.hstack(self.yvertices)
self._copy_cache = True
return (np.min(xvertices),
np.max(xvertices),
np.min(yvertices),
np.max(yvertices))
@property
def grid_lines(self):
"""
Creates a series of grid line vertices for drawing
a model grid line collection
Returns:
list: grid line vertices
"""
self._copy_cache = False
xgrid = self.xvertices
ygrid = self.yvertices
lines = []
for ncell, verts in enumerate(xgrid):
for ix, vert in enumerate(verts):
lines.append([(xgrid[ncell][ix - 1], ygrid[ncell][ix - 1]),
(xgrid[ncell][ix], ygrid[ncell][ix])])
self._copy_cache = True
return lines
@property
def xyzcellcenters(self):
"""
Method to get cell centers and set to grid
"""
cache_index = 'cellcenters'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
self._build_grid_geometry_info()
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
@property
def xyzvertices(self):
"""
Method to get all grid vertices in a layer, arranged per cell
Returns:
list of size sum(nvertices per cell)
"""
cache_index = 'xyzgrid'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
self._build_grid_geometry_info()
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
def intersect(self, x, y, local=False, forgive=False):
"""
Get the CELL2D number of a point with coordinates x and y
When the point is on the edge of two cells, the cell with the lowest
CELL2D number is returned.
Parameters
----------
x : float
The x-coordinate of the requested point
y : float
The y-coordinate of the requested point
local: bool (optional)
If True, x and y are in local coordinates (defaults to False)
forgive: bool (optional)
Forgive x,y arguments that fall outside the model grid and
return NaNs instead (defaults to False - will throw exception)
Returns
-------
icell2d : int
The CELL2D number
"""
if Path is None:
s = 'Could not import matplotlib. Must install matplotlib ' + \
' in order to use VertexGrid.intersect() method'
raise ImportError(s)
if local:
# transform x and y to real-world coordinates
x, y = super(VertexGrid, self).get_coords(x,y)
xv, yv, zv = self.xyzvertices
for icell2d in range(self.ncpl):
xa = np.array(xv[icell2d])
ya = np.array(yv[icell2d])
# x and y at least have to be within the bounding box of the cell
if np.any(x <= xa) and np.any(x >= xa) and \
np.any(y <= ya) and np.any(y >= ya):
path = Path(np.stack((xa, ya)).transpose())
# use a small radius, so that the edge of the cell is included
if is_clockwise(xa, ya):
radius = -1e-9
else:
radius = 1e-9
if path.contains_point((x, y), radius=radius):
return icell2d
if forgive:
icell2d = np.nan
return icell2d
raise Exception('x, y point given is outside of the model area')
def get_cell_vertices(self, cellid):
"""
Method to get a set of cell vertices for a single cell
used in the Shapefile export utilities
:param cellid: (int) cellid number
:return: list of x,y cell vertices
"""
self._copy_cache = False
cell_verts = list(zip(self.xvertices[cellid],
self.yvertices[cellid]))
self._copy_cache = True
return cell_verts
def plot(self, **kwargs):
"""
Plot the grid lines.
Parameters
----------
kwargs : ax, colors. The remaining kwargs are passed into the
the LineCollection constructor.
Returns
-------
lc : matplotlib.collections.LineCollection
"""
from flopy.plot import PlotMapView
mm = PlotMapView(modelgrid=self)
return mm.plot_grid(**kwargs)
def _build_grid_geometry_info(self):
cache_index_cc = 'cellcenters'
cache_index_vert = 'xyzgrid'
xcenters = []
ycenters = []
xvertices = []
yvertices = []
if self._cell1d is not None:
zcenters = []
zvertices = []
vertexdict = {v[0]: [v[1], v[2], v[3]]
for v in self._vertices}
for cell1d in self._cell1d:
cell1d = tuple(cell1d)
xcenters.append(cell1d[1])
ycenters.append(cell1d[2])
zcenters.append(cell1d[3])
vert_number = []
for i in cell1d[3:]:
if i is not None:
vert_number.append(int(i))
xcellvert = []
ycellvert = []
zcellvert = []
for ix in vert_number:
xcellvert.append(vertexdict[ix][0])
ycellvert.append(vertexdict[ix][1])
zcellvert.append(vertexdict[ix][2])
xvertices.append(xcellvert)
yvertices.append(ycellvert)
zvertices.append(zcellvert)
else:
vertexdict = {v[0]: [v[1], v[2]]
for v in self._vertices}
# build xy vertex and cell center info
for cell2d in self._cell2d:
cell2d = tuple(cell2d)
xcenters.append(cell2d[1])
ycenters.append(cell2d[2])
vert_number = []
for i in cell2d[4:]:
if i is not None:
vert_number.append(int(i))
xcellvert = []
ycellvert = []
for ix in vert_number:
xcellvert.append(vertexdict[ix][0])
ycellvert.append(vertexdict[ix][1])
xvertices.append(xcellvert)
yvertices.append(ycellvert)
# build z cell centers
zvertices, zcenters = self._zcoords()
if self._has_ref_coordinates:
# transform x and y
xcenters, ycenters = self.get_coords(xcenters, ycenters)
xvertxform = []
yvertxform = []
# vertices are a list within a list
for xcellvertices, ycellvertices in zip(xvertices, yvertices):
xcellvertices, \
ycellvertices = self.get_coords(xcellvertices,
ycellvertices)
xvertxform.append(xcellvertices)
yvertxform.append(ycellvertices)
xvertices = xvertxform
yvertices = yvertxform
self._cache_dict[cache_index_cc] = CachedData([xcenters,
ycenters,
zcenters])
self._cache_dict[cache_index_vert] = CachedData([xvertices,
yvertices,
zvertices])
if __name__ == "__main__":
import os
import flopy as fp
ws = "../../examples/data/mf6/test003_gwfs_disv"
name = "mfsim.nam"
sim = fp.mf6.modflow.MFSimulation.load(sim_name=name, sim_ws=ws)
print(sim.model_names)
ml = sim.get_model("gwf_1")
dis = ml.dis
t = VertexGrid(dis.vertices.array, dis.cell2d.array, top=dis.top.array,
botm=dis.botm.array, idomain=dis.idomain.array,
epsg=26715, xoff=0, yoff=0, angrot=45)
sr_x = t.xvertices
sr_y = t.yvertices
sr_xc = t.xcellcenters
sr_yc = t.ycellcenters
sr_lc = t.grid_lines
sr_e = t.extent
t.use_ref_coords = False
x = t.xvertices
y = t.yvertices
z = t.zvertices
xc = t.xcellcenters
yc = t.ycellcenters
zc = t.zcellcenters
lc = t.grid_lines
e = t.extent
|
py | b405fa5559137871c09e8d2ec3bf11885c55cdb9 | import os
from datetime import datetime
import gzip
from cStringIO import StringIO
import requests
from csvkit.unicsv import UnicodeCSVReader
from sqlalchemy import Boolean, Float, Date, String, Column, \
Integer, Table, text, func, select, and_, cast, UniqueConstraint, \
join, outerjoin, BigInteger, MetaData
from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, TIME
from sqlalchemy.exc import NoSuchTableError
from geoalchemy2.shape import from_shape
from shapely.geometry import box
from boto.s3.connection import S3Connection, S3ResponseError
from boto.s3.key import Key
from ede.database import task_session as session, task_engine as engine
from ede.models import MetaTable, MasterTable
from ede.utils.helpers import slugify, iter_column
from ede.settings import AWS_ACCESS_KEY, AWS_SECRET_KEY, S3_BUCKET, DATA_DIR
COL_TYPES = {
'boolean': Boolean,
'integer': Integer,
'big_integer': BigInteger,
'float': Float,
'string': String,
'date': Date,
'time': TIME,
'timestamp': TIMESTAMP,
'datetime': TIMESTAMP,
}
class EDE_ETLError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class EDE_ETL(object):
def __init__(self, meta, data_types=None):
"""
Initializes with a dictionary representation of a
row from the meta_master table. If you include
keys for all of the columns in the meta_master
table, it doesn't hurt anything but the only keys
that are required are:
dataset_name: Machine version of the dataset name.
This is used to name the primary key field of the
data table for the dataset as well as the table
itself. Should be lowercase with words seperated
by underscores. Truncated to the first 50
characters.
source_url: This is used to download the raw data.
business_key: Name of the user identified business key from the
source data. AKA unique ID.
observed_date: Name of the user identified observed date column
from the source data
latitude: Name of the user identified latitude column
from the source data
logitude: Name of the user identified longitude column
from the source data
location: Name of the user identified location column from
from the source data. The values in this column
should be formatted like so
"(<latitude decimal degrees>, <longitude decimal degrees>)"
You can also optionally supply a list of dicts with the names of the fields
from the source data and the data type of the fields like so:
[
{
'field_name': 'A field name',
'data_type': 'integer',
},
{
'field_name': 'Another field name',
'data_type': 'string',
},
{
'field_name': 'Last field name',
'data_type': 'float'
},
]
'data_type' can be one of
'boolean'
'integer'
'big_integer'
'float'
'string'
'date'
'time'
'timestamp'
"""
# Add init parameters to EDE_ETL object
for k, v in meta.items():
setattr(self, k, v)
if data_types:
self.data_types = data_types
self.s3_key = None
self.metadata = MetaData()
# AWS_ACCESS_KEY as empty string is signal to operate locally.
if AWS_ACCESS_KEY != '':
# Name of file in S3 bucket will be dataset name appended with current time.
s3_path = '%s/%s.csv.gz' % (self.dataset_name,
datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
try:
s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
bucket = s3conn.get_bucket(S3_BUCKET)
self.s3_key = Key(bucket)
self.s3_key.key = s3_path
except S3ResponseError, e:
# XX: When this happens, we should log a more serious message
print "Failed to connect to S3 for filename '%s', trying to init locally" % self.dataset_name
self._init_local(self.dataset_name)
else:
self._init_local(self.dataset_name)
def _init_local(self, dataset_name):
"""
Set directory to download and process data file as DATA_DIR/dataset_name.csv.gz
"""
print "EDE_ETL._init_local('%s')" % dataset_name
self.fname = '%s.csv.gz' % dataset_name
self.data_dir = DATA_DIR
def add(self, s3_path=None):
if s3_path and s3_key:
# It looks like s3_key is unresolved, so this branch should never be taken?
self.s3_key.key = s3_path
else:
self._download_csv()
self._get_or_create_data_table()
self._make_src_table()
self._insert_src_data()
self._make_new_and_dup_table()
self._find_dup_data()
self._insert_new_data(added=True)
self._insert_data_table()
self._update_master()
self._update_meta(added=True)
self._update_geotags()
self._cleanup_temp_tables()
def update(self, s3_path=None):
if s3_path and s3_key:
self.s3_key.key = s3_path
else:
self._download_csv()
self._get_or_create_data_table()
self._make_src_table()
self._insert_src_data()
self._make_new_and_dup_table()
self._find_dup_data()
new = self._insert_new_data()
if new:
self._insert_data_table()
self._update_master()
self._update_meta()
self._update_geotags()
self._cleanup_temp_tables()
def _download_csv(self):
"""
If self.s3_key is set, download CSV to S3 bucket.
Else, download to local directory.
"""
r = requests.get(self.source_url, stream=True)
if self.s3_key:
s = StringIO()
with gzip.GzipFile(fileobj=s, mode='wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
s.seek(0)
self.s3_key.set_contents_from_file(s)
self.s3_key.make_public()
else:
# write out a la shapefile_helpers
self.fpath = os.path.join(self.data_dir, self.fname)
# If file already exists locally, don't perform copy.
if not os.path.exists(self.fpath):
gz_f = gzip.open(self.fpath, 'wb')
for chunk in r.iter_content(chunk_size=1024):
if chunk:
gz_f.write(chunk)
gz_f.flush()
gz_f.close() # Explicitly close before re-opening to read.
def _cleanup_temp_tables(self):
self.src_table.drop(bind=engine, checkfirst=True)
self.new_table.drop(bind=engine, checkfirst=True)
self.dup_table.drop(bind=engine, checkfirst=True)
try:
self.chg_table.drop(bind=engine, checkfirst=True)
except AttributeError:
pass
def _get_or_create_data_table(self):
"""
Step One: Make a table where the data will eventually live
True after this function: self.dat_table refers to a (possibly empty)
table in the database.
"""
try: # Maybe this table already exists in the database.
self.dat_table = Table('dat_%s' % self.dataset_name, self.metadata,
autoload=True, autoload_with=engine, extend_existing=True)
except NoSuchTableError: # Nope, we'll need to create it.
s = StringIO()
# If reading from AWS...
if self.s3_key:
# ...dump the contents into s.
self.s3_key.get_contents_to_file(s)
# If reading locally...
else:
# ... read the file out of DATA_DIR.
with open(self.fpath, 'r') as f:
s.write(f.read())
# Go to start of file.
s.seek(0)
# Find out what types of columns we'll need to store the data.
with gzip.GzipFile(fileobj=s, mode='rb') as f:
reader = UnicodeCSVReader(f)
header = map(slugify, reader.next())
col_types = [] # Will be list of pairs: (column_type, is_nullable)
try: # Were data_types specified at init?
types = getattr(self, 'data_types')
col_map = {c['field_name']: c['data_type'] for c in types}
for col in header:
t = col_map[col]
col_types.append((COL_TYPES[t], True)) # always nullable
except AttributeError: # Try to infer column types.
for col in range(len(header)):
col_types.append(iter_column(col, f))
# Create rows that will be used to keep track of the version of the source dataset
# that each particular row came from.
cols = [
Column('%s_row_id' % self.dataset_name, Integer, primary_key=True),
Column('start_date', TIMESTAMP, server_default=text('CURRENT_TIMESTAMP')),
Column('end_date', TIMESTAMP, server_default=text('NULL')),
Column('current_flag', Boolean, server_default=text('TRUE')),
Column('dup_ver', Integer)
]
# Generate columns for each column in the source dataset.
for col_name, d_type in zip(header, col_types):
dt, nullable = d_type
cols.append(Column(col_name, dt, nullable=nullable))
# Final column has columns whose values must be unique.
# Generated from business_key, dup_ver, and dataset_name.
cols.append(UniqueConstraint(slugify(self.business_key), 'dup_ver',
name='%s_ix' % self.dataset_name[:50]))
# Assemble data table from the columns...
self.dat_table = Table('dat_%s' % self.dataset_name, self.metadata,
*cols, extend_existing=True)
# ... and load it into the database.
self.dat_table.create(engine, checkfirst=True)
def _make_src_table(self):
"""
Step Two
Creates a table that is a simple copy of the source data.
(That is, it doesn't have the fancy extra columns like start_date that dat_table has)
True after this function: self.src_table refers to an empty table in the database.
"""
cols = []
skip_cols = ['%s_row_id' % self.dataset_name, 'start_date', 'end_date', 'current_flag', 'dup_ver']
for col in self.dat_table.columns:
if col.name not in skip_cols:
# Is there a default value that we need to give the coumn?
kwargs = {}
if col.server_default:
kwargs['server_default'] = col.server_default
# Add the column as it was in dat_table
cols.append(Column(col.name, col.type, **kwargs))
# Use the source CSV's line numbers as primary key.
cols.append(Column('line_num', Integer, primary_key=True))
self.src_table = Table('src_%s' % self.dataset_name, self.metadata,
*cols, extend_existing=True)
# If there is an old version of this raw dataset in the DB, kick it out.
self.src_table.drop(bind=engine, checkfirst=True)
# Add the table to the database.
self.src_table.create(bind=engine)
def _insert_src_data(self):
"""
Step Three: Insert data directly from CSV
True after this function: self.src_data is populated with the original dataset's values
or a EDE_ETLError is triggered that brings the process to a halt.
"""
skip_cols = ['line_num']
names = [c.name for c in self.src_table.columns]
# Create the COPY statement... creatively.
copy_st = 'COPY src_%s (' % self.dataset_name
for idx, name in enumerate(names):
if name not in skip_cols:
if idx < len(names) - len(skip_cols) - 1:
copy_st += '%s, ' % name
else:
copy_st += '%s)' % name
else:
copy_st += "FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ',')"
# Load the raw file from S3 or a local drive.
s = StringIO()
if self.s3_key:
self.s3_key.get_contents_to_file(s)
else:
with open(self.fpath, 'r') as f:
s.write(f.read())
# Dump the contents into the src_table we've created.
s.seek(0)
conn = engine.raw_connection()
with gzip.GzipFile(fileobj=s, mode='rb') as f:
try:
cursor = conn.cursor()
cursor.copy_expert(copy_st, f)
cursor.close()
conn.commit()
except Exception, e: # When the bulk copy fails on _any_ row, roll back the entire operation.
conn.rollback()
raise EDE_ETLError(e)
finally:
conn.close()
# The following code sets lat/lng to NULL when the given coordinate is (0,0) (e.g. off the coast of Africa).
# This was a problem for: http://plenario-dev.s3.amazonaws.com/sfpd_incident_all_datetime.csv
if self.latitude and self.longitude:
upd_st = """
UPDATE src_%s SET %s = NULL , %s = NULL FROM
(SELECT %s FROM src_%s WHERE %s=0 and %s =0) AS ids
WHERE src_%s.%s=ids.%s
""" % (self.dataset_name,
slugify(self.latitude), slugify(self.longitude),
slugify(self.business_key), self.dataset_name, slugify(self.latitude), slugify(self.longitude),
self.dataset_name, slugify(self.business_key), slugify(self.business_key))
with engine.begin() as conn:
conn.execute(upd_st)
elif self.location:
upd_st = """
UPDATE src_%s
SET %s=NULL FROM
(select %s, FLOAT8((regexp_matches(%s, '\((.*),(.*)\)'))[1]) as l1,
FLOAT8((regexp_matches(%s, '\((.*),(.*)\)'))[2]) as l2
from src_%s) as foo
WHERE foo.l1=0 and foo.l2 = 0 AND src_%s.%s = foo.%s
""" % (self.dataset_name,
slugify(self.location),
slugify(self.business_key),
slugify(self.location),
slugify(self.location),
self.dataset_name,
self.dataset_name, slugify(self.business_key), slugify(self.business_key))
with engine.begin() as conn:
conn.execute(upd_st)
# Also need to remove rows that have an empty business key
# There might be a better way to do this...
del_st = """
DELETE FROM src_%s WHERE %s IS NULL
""" % (self.dataset_name, slugify(self.business_key))
with engine.begin() as conn:
conn.execute(del_st)
def _make_new_and_dup_table(self):
"""
True after this function: self.new_table and self.dup_table
are created with columns for line_num, dup_ver, and business_key.
"""
# Grab the data table's business key column.
bk_col = self.dat_table.c[slugify(self.business_key)]
# TODO: DRY
cols = [
Column(slugify(self.business_key), bk_col.type, primary_key=True),
Column('line_num', Integer),
Column('dup_ver', Integer, primary_key=True)
]
self.new_table = Table('new_%s' % self.dataset_name, self.metadata,
*cols, extend_existing=True)
self.new_table.drop(bind=engine, checkfirst=True)
self.new_table.create(bind=engine)
cols = [
Column(slugify(self.business_key), bk_col.type, primary_key=True),
Column('line_num', Integer),
Column('dup_ver', Integer, primary_key=True)
]
self.dup_table = Table('dup_%s' % self.dataset_name, self.metadata,
*cols, extend_existing=True)
self.dup_table.drop(bind=engine, checkfirst=True)
self.dup_table.create(bind=engine)
def _find_dup_data(self):
"""
Step Five
Construct dup_ver column of dup_table and populate dup_table.
"""
# Taking the business key and line numbers of the source data...
cols = [
self.src_table.c[slugify(self.business_key)],
self.src_table.c['line_num'],
]
cols.append(func.rank()
# ... group by business key,
.over(partition_by=getattr(self.src_table.c, slugify(self.business_key)),
# ... and rank by line number.
order_by=self.src_table.columns['line_num'].desc())
# Call this our dup_ver column.
.label('dup_ver'))
# Make these three columns our dup_table.
sel = select(cols, from_obj=self.src_table)
ins = self.dup_table.insert()\
.from_select(self.dup_table.columns, sel)
with engine.begin() as conn:
conn.execute(ins)
def _insert_new_data(self, added=False):
"""
Step Six
Find which rows in dup_table aren't present in dat_table.
Add those new rows to new_table.
"""
bk = slugify(self.business_key)
# Align on line_num and bk (Shouldn't that include every entry of both tables?)
j = join(self.src_table, self.dup_table,
and_(self.src_table.c['line_num'] == self.dup_table.c['line_num'],
self.src_table.c[bk] == self.dup_table.c[bk]))
dup_tablename = self.dup_table.name
# Where possible, find where bk's and dup_ver's line up
outer = outerjoin(j, self.dat_table,
and_(self.dat_table.c[bk] == j.c['%s_%s' % (dup_tablename, bk)],
self.dat_table.c['dup_ver'] == j.c['%s_dup_ver' % dup_tablename]))
sel_cols = [
self.src_table.c[bk],
self.src_table.c['line_num'],
self.dup_table.c['dup_ver']
]
# If we are adding this dataset for the first time, bring in all of the deup_ver info
sel = select(sel_cols).select_from(outer)
if not added: # If we are updating, (not adding)
# only grab the dup_ver info not found in dat_table
sel = sel.where(self.dat_table.c['%s_row_id' % self.dataset_name] == None)
# Insert the new dup_ver info into new_table.
ins = self.new_table.insert()\
.from_select([c for c in self.new_table.columns], sel)
try:
with engine.begin() as conn:
conn.execute(ins)
return True
except TypeError:
# There are no new records
return False
def _insert_data_table(self):
"""
Step Seven
Insert the new data we identified in new_table into dat_table
by joining the references in new_table to the actual data living in src_table.
"""
# Take all columns from src_table (excluding most of the 'meta' columns)
skip_cols = ['%s_row_id' % self.dataset_name, 'end_date', 'current_flag', 'line_num']
from_vals = []
from_vals.append(text("'%s' AS start_date" % datetime.now().isoformat()))
from_vals.append(self.new_table.c.dup_ver)
for c_src in self.src_table.columns:
if c_src.name not in skip_cols:
from_vals.append(c_src)
sel = select(from_vals, from_obj=self.src_table)
bk = slugify(self.business_key)
ins = self.dat_table.insert()\
.from_select(
[c for c in self.dat_table.columns if c.name not in skip_cols],
sel.select_from(self.src_table.join(self.new_table,
and_(
self.src_table.c.line_num == self.new_table.c.line_num,
getattr(self.src_table.c, bk) == getattr(self.new_table.c, bk),
)
))
)
with engine.begin() as conn:
conn.execute(ins)
def _update_master(self, added=False):
"""
Step Eight: Insert new records into master table
"""
# Enumerate all the columns we'll be populating from dat_table
dat_cols = [
self.dat_table.c.start_date,
self.dat_table.c.end_date,
self.dat_table.c.current_flag,
]
if self.location:
dat_cols.append(getattr(self.dat_table.c, slugify(self.location))\
.label('location'))
else:
dat_cols.append(text("NULL as location"))
if self.latitude and self.longitude:
dat_cols.append(getattr(self.dat_table.c, slugify(self.latitude))\
.label('latitude'))
dat_cols.append(getattr(self.dat_table.c, slugify(self.longitude))\
.label('longitude'))
else:
dat_cols.append(text("NULL AS latitude"))
dat_cols.append(text("NULL AS longitude"))
dat_cols.append(func.cast(getattr(self.dat_table.c, slugify(self.observed_date)), TIMESTAMP)\
.label('obs_date'))
dat_cols.append(text("NULL AS weather_station_id"))
dat_cols.append(text("NULL AS geotag2"))
dat_cols.append(text("NULL AS geotag3"))
dat_cols.append(text("'%s' AS dataset_name" % self.dataset_name))
dat_pk = '%s_row_id' % self.dataset_name
dat_cols.append(getattr(self.dat_table.c, dat_pk))
# Derive point in space from either lat/long columns or single location column
if self.latitude and self.longitude:
dat_cols.append(text(
"ST_PointFromText('POINT(' || dat_%s.%s || ' ' || dat_%s.%s || ')', 4326) \
as location_geom" % (
self.dataset_name, slugify(self.longitude),
self.dataset_name, slugify(self.latitude),
)))
elif self.location:
dat_cols.append(text(
""" (
SELECT ST_PointFromText('POINT(' || subq.longitude || ' ' || subq.latitude || ')', 4326) \
FROM (
SELECT FLOAT8((regexp_matches(%s, '\((.*),.*\)'))[1]) AS latitude, \
FLOAT8((regexp_matches(%s, '\(.*,(.*)\)'))[1]) AS longitude \
FROM dat_%s as d where d."%s" = dat_%s."%s") AS subq) AS location_geom
""" %
(
slugify(self.location), slugify(self.location),
self.dataset_name, dat_pk, self.dataset_name, dat_pk,
)))
# Insert the data
mt = MasterTable.__table__
bk = slugify(self.business_key)
# If we're adding the dataset for the first time,
if added:
# just throw everything in.
ins = mt.insert()\
.from_select(
[c for c in mt.columns.keys() if c != 'master_row_id'],
select(dat_cols)
)
else: # If we're updating,
# just add the new stuff by joining on new_table.
ins = mt.insert()\
.from_select(
[c for c in mt.columns.keys() if c != 'master_row_id'],
select(dat_cols)\
.select_from(self.dat_table.join(self.new_table,
and_(
getattr(self.dat_table.c, bk) == getattr(self.new_table.c, bk),
self.dat_table.c.dup_ver == self.new_table.c.dup_ver
)
)
)
)
with engine.begin() as conn:
conn.execute(ins)
def _add_weather_info(self):
"""
This is just adding the weather observation id to the master table right now.
In the future we can modify it to do all the geo tagging we need for the
master table.
The update below assumes the weather stations table has already been
created and populated. I have no idea how to do it in SQLAlchemy,
mainly because of the geometry distance operator ('<->')
"""
# Yo dawg, I heard you like subqueries.
# I put a subquery in your subquery.
date_type = str(getattr(self.dat_table.c, slugify(self.observed_date)).type)
if 'timestamp' in date_type.lower():
weather_table = 'dat_weather_observations_hourly'
date_col_name = 'datetime'
temp_col = 'drybulb_fahrenheit'
else:
weather_table = 'dat_weather_observations_daily'
date_col_name = 'date'
temp_col = 'temp_avg'
upd = text(
"""
UPDATE dat_master SET weather_observation_id=subq.weather_id
FROM (
SELECT DISTINCT ON (d.master_row_id)
d.master_row_id AS master_id,
w.id as weather_id,
abs(extract(epoch from d.obs_date) - extract(epoch from w.%s)) as diff
FROM dat_master AS d
JOIN %s as w
ON w.wban_code = (
SELECT b.wban_code
FROM weather_stations AS b
ORDER BY d.location_geom <-> b.location LIMIT 1
)
WHERE d.location_geom IS NOT NULL
AND d.weather_observation_id IS NULL
AND d.dataset_name = :dname
AND d.obs_date > (
SELECT MIN(%s)
FROM %s
WHERE %s IS NOT NULL
)
AND d.obs_date < (
SELECT MAX(%s)
FROM %s
WHERE %s IS NOT NULL
)
ORDER BY d.master_row_id, diff
) as subq
WHERE dat_master.master_row_id = subq.master_id
""" % (date_col_name, weather_table,
date_col_name, weather_table, temp_col,
date_col_name, weather_table, temp_col,)
)
with engine.begin() as conn:
conn.execute(upd, dname=self.dataset_name)
def _add_census_block(self):
"""
Adds a census block geoid to entries in the master table
"""
upd = text("""
UPDATE dat_master SET census_block=subq.census_block
FROM (
SELECT
d.master_row_id as master_id,
c.geoid10 as census_block
FROM
dat_master as d
JOIN census_blocks as c
ON ST_Within(d.location_geom, c.geom)
WHERE d.census_block IS NULL
AND d.location_geom IS NOT NULL
AND d.dataset_name = :dname
) as subq
WHERE dat_master.master_row_id = subq.master_id
""")
with engine.begin() as conn:
conn.execute(upd, dname=self.dataset_name)
def _update_geotags(self):
# self._add_weather_info()
# self._add_weather_stations()
self._add_census_block()
def _update_meta(self, added=False):
"""
Update the meta_master table with obs_from, obs_to,
updated_date, bbox, and (when appropriate) date_added
"""
md = session.query(MetaTable)\
.filter(MetaTable.source_url_hash == self.source_url_hash)\
.first()
# Update time columns
now = datetime.now()
md.last_update = now
if added:
md.date_added = now
obs_date_col = getattr(self.dat_table.c, slugify(self.observed_date))
obs_from, obs_to = session.query(
func.min(obs_date_col),
func.max(obs_date_col))\
.first()
md.obs_from = obs_from
md.obs_to = obs_to
# Calculate bounding box
if self.latitude and self.longitude:
lat_col = getattr(self.dat_table.c, slugify(self.latitude))
lon_col = getattr(self.dat_table.c, slugify(self.longitude))
xmin, ymin, xmax, ymax = session.query(
func.min(lon_col),
func.min(lat_col),
func.max(lon_col),
func.max(lat_col))\
.first()
elif self.location:
loc_col = getattr(self.dat_table.c, slugify(self.location))
subq = session.query(
cast(func.regexp_matches(loc_col, '\((.*),.*\)'),
ARRAY(Float)).label('lat'),
cast(func.regexp_matches(loc_col, '\(.*,(.*)\)'),
ARRAY(Float)).label('lon'))\
.subquery()
try:
xmin, ymin, xmax, ymax = session.query(func.min(subq.c.lon),
func.min(subq.c.lat),
func.max(subq.c.lon),
func.max(subq.c.lat))\
.first()
xmin, ymin, xmax, ymax = xmin[0], ymin[0], xmax[0], ymax[0]
except:
session.rollback()
xmin, ymin, xmax, ymax = 0, 0, 0, 0
bbox = from_shape(box(xmin, ymin, xmax, ymax), srid=4326)
md.bbox = bbox
try:
session.add(md)
session.commit()
except:
session.rollback()
session.add(md)
session.commit()
|
py | b405fafaff331475f09744bd8d1291fd67e44a82 | #!/usr/bin/python
import os
import subprocess
repo_name = "operepo"
tag = "release"
save_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "volumes/app_images")
# Ensure the app_images folder exists
try:
os.makedirs(save_path)
except:
pass
# Should have an exception if it already exists
# Get the digest of the docker image
def get_app_digest(app_name):
global save_path, repo_name, tag
proc = subprocess.Popen(["/usr/bin/docker", "images", repo_name + "/" + app_name + ":" + tag], stdout=subprocess.PIPE)
lines = proc.stdout.readlines()
ret = "..."
for line in lines:
# Go through each line and find the digest for the latest tagged item
parts = line.split()
if parts[0] == repo_name + "/" + app_name and parts[1] == tag:
#print "\tFound digest: " + parts[2] + " for: " + repo_name + "/" + app_name + ":" + tag
ret = parts[2]
return ret
def save_app_digest(app_name, digest):
global save_path, repo_name
# Store the digest in the app path
digest_path = os.path.join(save_path, app_name + ".digest")
try:
f = open(digest_path, "w")
f.write(digest)
f.close()
except:
# Unable to save?
pass
def get_tar_digest(app_name):
global save_path, repo_name
tar_digest = "."
digest_path = os.path.join(save_path, app_name + ".digest")
# Open the file and read the digest of the currently saved tar file
try:
f = open(digest_path, "r")
tmp = f.read().strip()
if tmp != "":
tar_digest = tmp
f.close()
except:
# Unable to load last digest, just leave it empty
tar_digest = "."
return tar_digest
def load_app(app_name):
global save_path, repo_name, tag
img_path = os.path.join(save_path, app_name + ".tar.gz")
# Load the last saved tar digest
tar_digest = get_tar_digest(app_name)
# Load the current docker digest
app_digest = get_app_digest(app_name)
print "Digests (tar/app): " + tar_digest + "/" + app_digest
if app_digest != tar_digest or app_digest == "..." or tar_digest == "..":
# Save the binary
print "\tApp modified, importing with docker load from: " + img_path
os.system("docker load -i " + img_path)
# Update the digest
app_digest = get_app_digest(app_name)
save_app_digest(app_name, app_digest)
else:
# App hasn't changed
print "\tApp hasn't changed, skipping."
def processFolder(cwd=""):
global save_path, repo_name
ret = ""
if (os.path.isdir(cwd) != True):
#print "Not a folder, skipping..."
return ret
enabled = os.path.join(cwd, ".enabled")
if (os.path.isfile(enabled) != True):
#print "Not enabled, skipping " + cwd
return ret
dname = os.path.basename(cwd)
print "============================================"
print " Processing Image " + dname
print "============================================"
load_app(dname)
return ret
# Find enabled images and export them to the images folder
# def processFolder(cwd=""):
# global save_path, repo_name
# ret = ""
# if (os.path.isfile(cwd) != True):
#print "Not a file, skipping..."
# return ret
# dname = os.path.basename(cwd)
# if (dname.startswith("ope-")):
# print "\t============================================"
# print "\tProcessing Image " + dname
# print "\t============================================"
# img_path = os.path.join(save_path, dname)
# os.system("docker load -i " + img_path)
# return ret
if __name__ == "__main__":
# Loop through the folders and find containers with .enabled files.
pwd = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "docker_build_files")
for folder in os.listdir(pwd):
processFolder(os.path.join(pwd, folder))
|
py | b405fcbf8404bdd419b17868402541d85460e678 | from django.contrib import admin
from . models import Student
# Register your models here.
admin.site.register(Student) |
py | b405fcf09bc19e15ebb7b865b2b09a0d06ac0c70 | from bgl import *
from mathutils import Vector
class Rectangle:
def __init__(self, x1 = 0, y1 = 0, x2 = 0, y2 = 0):
self.x1 = float(x1)
self.y1 = float(y1)
self.x2 = float(x2)
self.y2 = float(y2)
self.color = (0.8, 0.8, 0.8, 1.0)
self.border_color = (0.1, 0.1, 0.1, 1.0)
self.border_thickness = 0
@property
def width(self):
return abs(self.x1 - self.x2)
@property
def height(self):
return abs(self.y1 - self.y2)
@property
def left(self):
return min(self.x1, self.x2)
@property
def right(self):
return max(self.x1, self.x2)
@property
def top(self):
return max(self.y1, self.y2)
@property
def bottom(self):
return min(self.y1, self.y2)
@property
def center(self):
return Vector((self.center_x, self.center_y))
@property
def center_x(self):
return (self.x1 + self.x2) / 2
@property
def center_y(self):
return (self.y1 + self.y2) / 2
def contains(self, point):
return self.left <= point[0] <= self.right and self.bottom <= point[1] <= self.top
def draw(self):
glColor4f(*self.color)
glEnable(GL_BLEND)
glBegin(GL_POLYGON)
glVertex2f(self.x1, self.y1)
glVertex2f(self.x2, self.y1)
glVertex2f(self.x2, self.y2)
glVertex2f(self.x1, self.y2)
glEnd()
if self.border_thickness != 0:
self.drawBorder()
def drawBorder(self):
thickness = self.border_thickness
thickness = min(abs(self.x1 - self.x2) / 2, abs(self.y1 - self.y2) / 2, thickness)
left, right = sorted([self.x1, self.x2])
bottom, top = sorted([self.y1, self.y2])
if thickness > 0:
topBorder = Rectangle(left, top, right, top - thickness)
bottomBorder = Rectangle(left, bottom + thickness, right, bottom)
else:
topBorder = Rectangle(left + thickness, top, right - thickness, top - thickness)
bottomBorder = Rectangle(left + thickness, bottom + thickness, right - thickness, bottom)
leftBorder = Rectangle(left, top, left + thickness, bottom)
rightBorder = Rectangle(right - thickness, top, right, bottom)
for border in (topBorder, bottomBorder, leftBorder, rightBorder):
border.color = self.border_color
border.draw()
def __repr__(self):
return "({}, {}) - ({}, {})".format(self.x1, self.y1, self.x2, self.y2)
|
py | b405fd096500c955b6d77accf50311e636f281d4 | #!/usr/bin/env python
import setuptools
long_desc = open('README.rst').read()
setuptools.setup(
name="warmup4ie",
version='0.1.3',
description='client library for 4IE thermostat sold by warmup',
long_description=long_desc,
license='Apache',
author='Alexander Hinz',
author_email='[email protected]',
url='https://github.com/alex-0103/warmup4IE',
packages=setuptools.find_packages(),
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License ',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Home Automation',
'Topic :: Software Development :: Libraries',
],
)
|
py | b405fdbb80e0aaaa141ec9b25a48bcd7349ae6dd | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './ui/src/MainForm.ui'
#
# Created by: PyQt5 UI core generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtWidgets
class Ui_MainForm(object):
def setupUi(self, MainForm):
MainForm.setObjectName("MainForm")
MainForm.resize(600, 400)
self.horizontalLayout = QtWidgets.QHBoxLayout(MainForm)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget = QtWidgets.QWidget(MainForm)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(250, 0))
self.widget.setMaximumSize(QtCore.QSize(250, 16777215))
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.building_params_group_box = QtWidgets.QGroupBox(self.widget)
self.building_params_group_box.setObjectName("building_params_group_box")
self.formLayout_2 = QtWidgets.QFormLayout(self.building_params_group_box)
self.formLayout_2.setLabelAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.formLayout_2.setObjectName("formLayout_2")
self.label = QtWidgets.QLabel(self.building_params_group_box)
self.label.setObjectName("label")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.evolution_step_spin = QtWidgets.QSpinBox(self.building_params_group_box)
self.evolution_step_spin.setMinimum(1)
self.evolution_step_spin.setMaximum(12)
self.evolution_step_spin.setObjectName("evolution_step_spin")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.evolution_step_spin)
self.label_3 = QtWidgets.QLabel(self.building_params_group_box)
self.label_3.setObjectName("label_3")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.init_x_spin = QtWidgets.QSpinBox(self.building_params_group_box)
self.init_x_spin.setMinimum(1)
self.init_x_spin.setMaximum(10000)
self.init_x_spin.setObjectName("init_x_spin")
self.horizontalLayout_3.addWidget(self.init_x_spin)
self.init_y_spin = QtWidgets.QSpinBox(self.building_params_group_box)
self.init_y_spin.setMinimum(1)
self.init_y_spin.setMaximum(10000)
self.init_y_spin.setObjectName("init_y_spin")
self.horizontalLayout_3.addWidget(self.init_y_spin)
self.formLayout_2.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_3)
self.label_4 = QtWidgets.QLabel(self.building_params_group_box)
self.label_4.setObjectName("label_4")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.step_length_spin = QtWidgets.QSpinBox(self.building_params_group_box)
self.step_length_spin.setMinimum(1)
self.step_length_spin.setMaximum(1000)
self.step_length_spin.setProperty("value", 10)
self.step_length_spin.setObjectName("step_length_spin")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.step_length_spin)
self.verticalLayout_2.addWidget(self.building_params_group_box)
self.definition_group_box = QtWidgets.QGroupBox(self.widget)
self.definition_group_box.setObjectName("definition_group_box")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.definition_group_box)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.formLayout_3 = QtWidgets.QFormLayout()
self.formLayout_3.setObjectName("formLayout_3")
self.label_5 = QtWidgets.QLabel(self.definition_group_box)
self.label_5.setObjectName("label_5")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.file_name_edit = QtWidgets.QLineEdit(self.definition_group_box)
self.file_name_edit.setReadOnly(True)
self.file_name_edit.setObjectName("file_name_edit")
self.horizontalLayout_2.addWidget(self.file_name_edit)
self.open_file_button = QtWidgets.QToolButton(self.definition_group_box)
self.open_file_button.setObjectName("open_file_button")
self.horizontalLayout_2.addWidget(self.open_file_button)
self.formLayout_3.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_2)
self.verticalLayout_3.addLayout(self.formLayout_3)
self.definition_text = QtWidgets.QPlainTextEdit(self.definition_group_box)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.definition_text.sizePolicy().hasHeightForWidth())
self.definition_text.setSizePolicy(sizePolicy)
self.definition_text.setDocumentTitle("")
self.definition_text.setReadOnly(True)
self.definition_text.setObjectName("definition_text")
self.verticalLayout_3.addWidget(self.definition_text)
self.verticalLayout_3.setStretch(1, 1)
self.verticalLayout_2.addWidget(self.definition_group_box)
self.verticalLayout_2.setStretch(1, 1)
self.horizontalLayout.addWidget(self.widget)
self.paint_frame = LSystemPaintWidget(MainForm)
self.paint_frame.setAutoFillBackground(False)
self.paint_frame.setFrameShape(QtWidgets.QFrame.Box)
self.paint_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.paint_frame.setObjectName("paint_frame")
self.horizontalLayout.addWidget(self.paint_frame)
self.retranslateUi(MainForm)
QtCore.QMetaObject.connectSlotsByName(MainForm)
MainForm.setTabOrder(self.evolution_step_spin, self.init_x_spin)
MainForm.setTabOrder(self.init_x_spin, self.init_y_spin)
MainForm.setTabOrder(self.init_y_spin, self.step_length_spin)
MainForm.setTabOrder(self.step_length_spin, self.open_file_button)
MainForm.setTabOrder(self.open_file_button, self.file_name_edit)
MainForm.setTabOrder(self.file_name_edit, self.definition_text)
def retranslateUi(self, MainForm):
_translate = QtCore.QCoreApplication.translate
MainForm.setWindowTitle(_translate("MainForm", "L-системы"))
self.building_params_group_box.setTitle(_translate("MainForm", "Параметры построения"))
self.label.setText(_translate("MainForm", "Этап эволюции:"))
self.label_3.setText(_translate("MainForm", "Начальная точка (X ; Y):"))
self.label_4.setText(_translate("MainForm", "Длина шага построения:"))
self.definition_group_box.setTitle(_translate("MainForm", "Определение L-системы"))
self.label_5.setText(_translate("MainForm", "Файл определения:"))
self.open_file_button.setText(_translate("MainForm", "..."))
from ui.widgets.lsystem_paint_widget import LSystemPaintWidget
|
py | b405fe2592678e2272fec686f2c71307ab7c398a | #!/usr/bin/env python
import os
__all__ = ["package_dir","py_modules"]
# repo/
# repo/py_modules/
# repo/py_modules/modname1.py
# repo/py_modules/modname2.py
# known-issues:
# 1) 'package_dir' used with 'packages' and 'py_modules' (merge required)
cwd = os.getcwd()
def listnames(path):
listdir = os.listdir(path)
for l in listdir:
if os.path.splitext(l)[1] != ".py":
continue
fullpath = os.path.join(path, l)
if not os.path.isfile(fullpath):
continue
yield l.replace(".py", "")
path = os.path.join(cwd, "py_modules")
if os.path.exists(path) and os.path.isdir(path):
py_modules = list(listnames(path))
if py_modules:
package_dir = {'': "py_modules"}
|
py | b405ffcd40eeec2d037fa8815332d41658c2a2f1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Flask-Generic-Views documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 30 04:16:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import ast
import os
import re
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flask-Generic-Views'
copyright = '2015, Daniel Knell'
author = 'Daniel Knell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('../flask_generic_views/__init__.py', 'rb') as f:
release = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
# The short X.Y version.
version=release.split('-')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Generic-Viewsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-Generic-Views.tex', 'Flask-Generic-Views Documentation',
'Daniel Knell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-generic-views', 'Flask-Generic-Views Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-Generic-Views', 'Flask-Generic-Views Documentation',
author, 'Flask-Generic-Views', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/dev', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'flask': ('http://flask.pocoo.org/docs/', None),
'jinja': ('http://jinja.pocoo.org/docs/', None),
'sqlalchemy': ('http://www.sqlalchemy.org/docs/', None),
'wtforms': ('https://wtforms.readthedocs.org/en/latest/', None),
'flaskwtf': ('https://flask-wtf.readthedocs.org/en/latest/', None),
'flasksqlalchemy': ('http://flask-sqlalchemy.pocoo.org/', None)}
|
py | b406005aad9ecd0f1fde900c21942cf18bc4b208 | #!/usr/bin/env python
def temp_dir(id):
path = "/tmp/px4_sitl_files"
if id:
path += "/id_" + str(id)
return path
def udp_config(id):
px4_id = id-1
config = {}
config["gcs_url"] = "udp://@127.0.0.1:" + str(18570+px4_id)
config["simulator_tcp_port"] = 4560+px4_id
config["simulator_udp_port"] = 14560+px4_id
config["udp_offboard_port_local"] = 14580+px4_id
config["udp_offboard_port_remote"] = 14540+px4_id
config["udp_onboard_payload_port_local"] = 14280+px4_id
config["udp_onboard_payload_port_remote"] = 14030+px4_id
return config
def check_unknown_args(unknown):
for arg in unknown:
if arg[0] == '-':
raise SyntaxWarning("Unexpected argument " + arg)
# def fcu_url(id, mode)
if __name__ == "__main__":
print "This is a utils collection, not a script!"
|
py | b406015a2aa58ef3414f570ecf86faa479f69226 | #!/usr/bin/env python
# coding: utf-8
# # Desafio 3
#
# Neste desafio, iremos praticar nossos conhecimentos sobre distribuições de probabilidade. Para isso,
# dividiremos este desafio em duas partes:
#
# 1. A primeira parte contará com 3 questões sobre um *data set* artificial com dados de uma amostra normal e
# uma binomial.
# 2. A segunda parte será sobre a análise da distribuição de uma variável do _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2), contendo 2 questões.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
# In[3]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
from statsmodels.distributions.empirical_distribution import ECDF
# In[4]:
'''%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()'''
# ## Parte 1
# ### _Setup_ da parte 1
# In[5]:
np.random.seed(42)
dataframe = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000),
"binomial": sct.binom.rvs(100, 0.2, size=10000)})
# ## Inicie sua análise a partir da parte 1 a partir daqui
# In[6]:
# Sua análise da parte 1 começa aqui.
dataframe.head()
# ## Questão 1
#
# Qual a diferença entre os quartis (Q1, Q2 e Q3) das variáveis `normal` e `binomial` de `dataframe`? Responda como uma tupla de três elementos arredondados para três casas decimais.
#
# Em outra palavras, sejam `q1_norm`, `q2_norm` e `q3_norm` os quantis da variável `normal` e `q1_binom`, `q2_binom` e `q3_binom` os quantis da variável `binom`, qual a diferença `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?
# In[7]:
def q1():
q1_norm, q2_norm, q3_norm = dataframe['normal'].quantile([0.25,0.5,0.75])
q1_binom, q2_binom, q3_binom = dataframe['binomial'].quantile([0.25,0.5,0.75])
return (round(q1_norm - q1_binom,3), round(q2_norm - q2_binom,3), round(q3_norm - q3_binom,3))
q1()
# Para refletir:
#
# * Você esperava valores dessa magnitude?
#
# * Você é capaz de explicar como distribuições aparentemente tão diferentes (discreta e contínua, por exemplo) conseguem dar esses valores?
# ## Questão 2
#
# Considere o intervalo $[\bar{x} - s, \bar{x} + s]$, onde $\bar{x}$ é a média amostral e $s$ é o desvio padrão. Qual a probabilidade nesse intervalo, calculada pela função de distribuição acumulada empírica (CDF empírica) da variável `normal`? Responda como uma único escalar arredondado para três casas decimais.
# In[20]:
def q2():
dataframe.sort_values(by='normal', inplace=True)
media = dataframe['normal'].mean()
std = dataframe['normal'].std()
ecdf = ECDF(dataframe["normal"])
return float(round((ecdf(media + std) - ecdf(media - std)), 3))
q2()
# Para refletir:
#
# * Esse valor se aproxima do esperado teórico?
# * Experimente também para os intervalos $[\bar{x} - 2s, \bar{x} + 2s]$ e $[\bar{x} - 3s, \bar{x} + 3s]$.
# ## Questão 3
#
# Qual é a diferença entre as médias e as variâncias das variáveis `binomial` e `normal`? Responda como uma tupla de dois elementos arredondados para três casas decimais.
#
# Em outras palavras, sejam `m_binom` e `v_binom` a média e a variância da variável `binomial`, e `m_norm` e `v_norm` a média e a variância da variável `normal`. Quais as diferenças `(m_binom - m_norm, v_binom - v_norm)`?
# In[13]:
def q3():
m_binom = dataframe['binomial'].mean()
v_binom = dataframe['binomial'].var()
m_norm = dataframe['normal'].mean()
v_norm = dataframe['normal'].var()
return (round(m_binom - m_norm,3), round(v_binom - v_norm,3))
q3()
# Para refletir:
#
# * Você esperava valore dessa magnitude?
# * Qual o efeito de aumentar ou diminuir $n$ (atualmente 100) na distribuição da variável `binomial`?
# ## Parte 2
# ### _Setup_ da parte 2
# In[15]:
stars = pd.read_csv("pulsar_stars.csv")
stars.rename({old_name: new_name
for (old_name, new_name)
in zip(stars.columns,
["mean_profile", "sd_profile", "kurt_profile", "skew_profile", "mean_curve", "sd_curve", "kurt_curve", "skew_curve", "target"])
},
axis=1, inplace=True)
stars.loc[:, "target"] = stars.target.astype(bool)
# ## Inicie sua análise da parte 2 a partir daqui
# In[16]:
# Sua análise da parte 2 começa aqui.
stars.head()
# ## Questão 4
#
# Considerando a variável `mean_profile` de `stars`:
#
# 1. Filtre apenas os valores de `mean_profile` onde `target == 0` (ou seja, onde a estrela não é um pulsar).
# 2. Padronize a variável `mean_profile` filtrada anteriormente para ter média 0 e variância 1.
#
# Chamaremos a variável resultante de `false_pulsar_mean_profile_standardized`.
#
# Encontre os quantis teóricos para uma distribuição normal de média 0 e variância 1 para 0.80, 0.90 e 0.95 através da função `norm.ppf()` disponível em `scipy.stats`.
#
# Quais as probabilidade associadas a esses quantis utilizando a CDF empírica da variável `false_pulsar_mean_profile_standardized`? Responda como uma tupla de três elementos arredondados para três casas decimais.
# In[36]:
# Filtrar valores para questão 4 e 5
mean_profile = stars.loc[stars['target'] == False ,'mean_profile']
# Padronizar
false_pulsar_mean_profile_standardized = (mean_profile - mean_profile.mean())/mean_profile.std()
# In[31]:
def q4():
# Quartis teoricos
quartis = sct.norm.ppf([0.80, 0.90, 0.95], loc=0, scale=1)
#Empirical CDF
ecdf = ECDF(false_pulsar_mean_profile_standardized)
return tuple(ecdf(quartis).round(3))
q4()
# Para refletir:
#
# * Os valores encontrados fazem sentido?
# * O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`?
# ## Questão 5
#
# Qual a diferença entre os quantis Q1, Q2 e Q3 de `false_pulsar_mean_profile_standardized` e os mesmos quantis teóricos de uma distribuição normal de média 0 e variância 1? Responda como uma tupla de três elementos arredondados para três casas decimais.
# In[41]:
def q5():
# Quartis teoricos
quartis = sct.norm.ppf([0.25, 0.50, 0.75], loc=0, scale=1)
# Quartis de false_pulsar_mean_profile_standardized
q1, q2, q3 = false_pulsar_mean_profile_standardized.quantile([0.25,0.5,0.75])
return (round(q1-quartis[0],3), round(q2-quartis[1],3), round(q3-quartis[2],3))
q5()
# Para refletir:
#
# * Os valores encontrados fazem sentido?
# * O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`?
# * Curiosidade: alguns testes de hipóteses sobre normalidade dos dados utilizam essa mesma abordagem.
|
py | b40603212070a6a7c90825452f2c6bdb57a98dd3 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import ibis.compat as compat
from ibis.config import options
def guid():
try:
from ibis.comms import uuid4_hex
return uuid4_hex()
except ImportError:
from uuid import uuid4
guid = uuid4()
return guid.hex if compat.PY3 else guid.get_hex()
def bytes_to_uint8_array(val, width=70):
"""
Formats a byte string for use as a uint8_t* literal in C/C++
"""
if len(val) == 0:
return '{}'
lines = []
line = '{' + str(ord(val[0]))
for x in val[1:]:
token = str(ord(x))
if len(line) + len(token) > width:
lines.append(line + ',')
line = token
else:
line += ',%s' % token
lines.append(line)
return '\n'.join(lines) + '}'
def unique_by_key(values, key):
id_to_table = {}
for x in values:
id_to_table[key(x)] = x
return compat.dict_values(id_to_table)
def indent(text, spaces):
block = ' ' * spaces
return '\n'.join(block + x for x in text.split('\n'))
def any_of(values, t):
for x in values:
if isinstance(x, t):
return True
return False
def all_of(values, t):
for x in values:
if not isinstance(x, t):
return False
return True
def promote_list(val):
if not isinstance(val, list):
val = [val]
return val
class IbisSet(object):
def __init__(self, keys=None):
self.keys = keys or []
@classmethod
def from_list(cls, keys):
return IbisSet(keys)
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def add(self, obj):
self.keys.append(obj)
class IbisMap(object):
def __init__(self):
self.keys = []
self.values = []
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def set(self, key, value):
self.keys.append(key)
self.values.append(value)
def get(self, key):
for k, v in zip(self.keys, self.values):
if key.equals(k):
return v
raise KeyError(key)
def is_function(v):
return isinstance(v, (types.FunctionType, types.LambdaType))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
Brought over from from pandas
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.unicode_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def deprecate(f, message):
def g(*args, **kwargs):
print(message)
return f(*args, **kwargs)
return g
def to_stdout(x):
print(x)
def log(msg):
if options.verbose:
(options.verbose_log or to_stdout)(msg)
class cache_readonly(object):
def __init__(self, func=None, allow_setting=False):
if func is not None:
self.func = func
self.name = func.__name__
self.allow_setting = allow_setting
def __call__(self, func, doc=None):
self.func = func
self.name = func.__name__
return self
def __get__(self, obj, typ):
# Get the cache or set a default one if needed
cache = getattr(obj, '_cache', None)
if cache is None:
try:
cache = obj._cache = {}
except (AttributeError):
return
if self.name in cache:
val = cache[self.name]
else:
val = self.func(obj)
cache[self.name] = val
return val
def __set__(self, obj, value):
if not self.allow_setting:
raise Exception("cannot set values for [%s]" % self.name)
# Get the cache or set a default one if needed
cache = getattr(obj, '_cache', None)
if cache is None:
try:
cache = obj._cache = {}
except (AttributeError):
return
cache[self.name] = value
def approx_equal(a, b, eps):
assert abs(a - b) < eps
def implements(f):
def decorator(g):
g.__doc__ = f.__doc__
return g
return decorator
|
py | b40604374e89bfc18aba02788018704e9ea6a492 | ##
# File: PharosTargetCofactorProviderTests.py
# Author: J. Westbrook
# Date: 15-Jun-2021
# Version: 0.001
#
# Update:
#
#
##
"""
Tests for utilities managing Pharos target cofactor data.
"""
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "[email protected]"
__license__ = "Apache 2.0"
import logging
import os
import platform
import resource
import time
import unittest
from rcsb.utils.targets.PharosTargetCofactorProvider import PharosTargetCofactorProvider
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(HERE))
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
class PharosTargetCofactorProviderTests(unittest.TestCase):
def setUp(self):
self.__cachePath = os.path.join(HERE, "test-output", "CACHE")
#
self.__seqMatchResultsPath = os.path.join(HERE, "test-data", "pharos-vs-pdbprent-filtered-results.json.gz")
self.__startTime = time.time()
logger.info("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testBuildPharosTargetsCofactors(self):
stfP = PharosTargetCofactorProvider(cachePath=self.__cachePath, useCache=False)
ok = stfP.testCache()
self.assertFalse(ok)
ok = stfP.buildCofactorList(self.__seqMatchResultsPath)
self.assertTrue(ok)
stfP = PharosTargetCofactorProvider(cachePath=self.__cachePath, useCache=True)
ok = stfP.testCache()
self.assertTrue(ok)
ok = stfP.hasTarget("5fn7_1")
self.assertTrue(ok)
aL = stfP.getTargets("5fn7_1")
self.assertGreaterEqual(len(aL[0]["cofactors"]), 5)
def buildPharosTargetCofactors():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(PharosTargetCofactorProviderTests("testBuildPharosTargetsCofactors"))
return suiteSelect
if __name__ == "__main__":
mySuite = buildPharosTargetCofactors()
unittest.TextTestRunner(verbosity=2).run(mySuite)
|
py | b406046e85bf22701fb8f82d6c6b80e0c8dde06e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMarketingDataRetailDmQueryModel(object):
def __init__(self):
self._content_id = None
self._shop_ids = None
@property
def content_id(self):
return self._content_id
@content_id.setter
def content_id(self, value):
self._content_id = value
@property
def shop_ids(self):
return self._shop_ids
@shop_ids.setter
def shop_ids(self, value):
if isinstance(value, list):
self._shop_ids = list()
for i in value:
self._shop_ids.append(i)
def to_alipay_dict(self):
params = dict()
if self.content_id:
if hasattr(self.content_id, 'to_alipay_dict'):
params['content_id'] = self.content_id.to_alipay_dict()
else:
params['content_id'] = self.content_id
if self.shop_ids:
if isinstance(self.shop_ids, list):
for i in range(0, len(self.shop_ids)):
element = self.shop_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.shop_ids[i] = element.to_alipay_dict()
if hasattr(self.shop_ids, 'to_alipay_dict'):
params['shop_ids'] = self.shop_ids.to_alipay_dict()
else:
params['shop_ids'] = self.shop_ids
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMarketingDataRetailDmQueryModel()
if 'content_id' in d:
o.content_id = d['content_id']
if 'shop_ids' in d:
o.shop_ids = d['shop_ids']
return o
|
py | b40604b4f4a16ef17f7c57df99f75f9fd6cff2d6 | from datetime import time
import numpy as np
import pytest
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestBetweenTime:
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH#20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
|
py | b406050c9ce7ebfc6e38409e389b9a17e58c79e1 | import datetime
import re
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class ChiSsa16Spider(CityScrapersSpider):
name = "chi_ssa_16"
agency = "Chicago Special Service Area #16"
timezone = "America/Chicago"
start_urls = ["https://greektownchicago.org/about/ssa-16/"]
def parse(self, response):
"""
Meeting entries are contained in a list item without any specific class or id
designation. Because of this, I filtered the present list items on the page with
the ", 20" string (every meeting li has this in the present text from the year
listed), which indicates the meeting occurred or is scheduled to occur and
therefore contains relevant information to document.
"""
for item in response.xpath('//li[contains(text(), ", 20")]'):
meeting = Meeting(
title=self._parse_title(item),
description=self._parse_description(item),
classification=self._parse_classification(item),
start=self._parse_start(item),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item),
links=self._parse_links(item),
source=self._parse_source(response),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_title(self, item):
# There was no variation in types of meetings, so this was applicable for all.
return "Tax Commission"
def _parse_description(self, item):
return ""
def _parse_classification(self, item):
return COMMISSION
def _parse_start(self, item):
# Extracted all alphanumeric characters from the text of each meeting,
# which contained the dates of the meeting, then split into list containing M,
# D, Y
date = re.sub(r"[^a-zA-Z0-9]+", " ", item.xpath("text()").get()).split()
# Extracted just alphabetical characters from month, then used built-in strptime
# function in datetime to parse numeric month from string
month = re.sub(r"[^a-zA-Z]+", " ", date[0])
month = datetime.datetime.strptime(month, "%B").month
# Extracted numbers from day and year entries of date list to remove any errant
# characters (i.e. one entry said January 28th where all others read January 28)
day = int(re.sub(r"[^0-9]+", " ", date[1]))
year = int(re.sub(r"[^0-9]+", " ", date[2]))
# Extracts accompanying meeting information to extract meeting time
time = item.xpath(
'ancestor::div[@class="gdc_row"]'
'/descendant::p[contains(text(), "60661")]/text()'
).get()
time = re.sub(r"[^a-zA-Z0-9:]+", "", time)
time = re.findall(r"\d{1,2}:\d{2}(?:AM|PM|am|pm)", time)[0].upper()
hour = datetime.datetime.strptime(time, "%I:%M%p").hour
minute = datetime.datetime.strptime(time, "%I:%M%p").minute
return datetime.datetime(year, month, day, hour, minute)
def _parse_end(self, item):
"""
Meeting adjournment information contained in files present in Minutes documents
attached to each meeting that are un-optimized scanned docs.
"""
return None
def _parse_time_notes(self, item):
# No other details present
return ""
def _parse_all_day(self, item):
return False
def _parse_location(self, item):
# Meetings occurred at same location for all years documented
return {
"address": "306 S. Halsted St, 2nd Floor, Chicago, IL 60661",
"name": "SSA #16 Office",
}
def _parse_links(self, item):
"""
Most entries contained a Minutes document, but some not yet posted or otherwise
have not occurred. When no anchor tag found, returns empty JSON element.
"""
if item.xpath("a/@href").get() is None:
return []
return [
{"href": item.xpath("a/@href").get(), "title": item.xpath("a/text()").get()}
]
def _parse_source(self, response):
return response.url
|
py | b4060845588ddb11c143707e65ebca61c336f9b8 | import os
import shutil
from mojo.compile import executeCommand
woff2_compress = os.path.join(os.path.dirname(__file__), "woff2_compress")
os.chmod(woff2_compress, 0o0777)
def generateWOFF2(source, dest):
cmds = [woff2_compress, source]
result = executeCommand(cmds)
resultWoff = os.path.splitext(source)[0] + ".woff2"
shutil.move(resultWoff, dest)
return result
def WOFF2Builder(sourcePath, destinationPath):
fileName, ext = os.path.splitext(sourcePath)
if ext.lower() not in [".ttf", ".otf"]:
return
result = generateWOFF2(sourcePath, destinationPath)
return result
def WOFF2BuilderFromFolder(sourceDir, destinationDir):
for fileName in os.listdir(sourceDir):
name, ext = os.path.splitext(fileName)
path = os.path.join(sourceDir, fileName)
destinationPath = os.path.join(destinationDir, name+".woff2")
WOFF2Builder(path, destinationPath)
|
py | b40608ce06f88257c0e5a5f1c46c24124b848f19 | #!/usr/bin/env python3
#
# Script to convert Gaussian output to ASE-extxyz trajectory
# by Patrick Melix
# 2018/03/13
#
# You can import the module and then call .main() or use it as a script
from ase import io
import os
def main(inFile,outFile):
if not os.path.isfile(inFile):
raise ValueError('File {:} does not exist'.format(str(inFile)))
#if output exists mv to .bak
if os.path.isfile(outFile):
print('ATTENTION: {:} exists, moving to *.bak'.format(outFile))
os.rename(outFile, outFile+'.bak')
mol = io.read(inFile, format='gaussian-out', quantity='structures')
for frame in mol:
frame.write(outFile,append=True)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Convert Gaussian output to ASE-extxyz trajectory')
parser.add_argument('input', type=str, help='input xyz file')
parser.add_argument('output', type=str, help='output file')
args = parser.parse_args()
main(args.input,args.output)
|
py | b40609703f582f3e2eb3a05ec33847a3905d70f0 | from django.shortcuts import render, redirect, reverse
from django.contrib.auth import login, logout
from .forms import LoginForm, ProfileForm, EmailForm
from .models import ManagerUser
from shipmanage.models import Ship, Order, Berth
# Create your views here.
def user_login(request):
if request.user.is_authenticated:
return redirect('/')
else:
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
user = login_form.cleaned_data['user']
login(request, user)
return redirect('/')
else:
login_form = LoginForm()
context = {'login_form':login_form, }
return render(request, "login.html", context)
def home_page(request):
if request.user.is_authenticated:
ship = Ship.objects.all()
order = Order.objects.all()
berth = Berth.objects.all()
context = {'ship_count':ship.count, 'order_time':order, 'berth_count':berth.count}
return render(request, "index.html", context)
else:
return redirect('login')
def user_logout(request):
logout(request)
return redirect(request.GET.get('from', reverse('login')))
def forget_password(request):
return render(request, "forgot.html")
def user_profile(request):
if not request.user.is_authenticated:
return redirect('login')
else:
if request.method == 'POST':
Profile_form = ProfileForm(request.POST, user=request.user)
if Profile_form.is_valid():
user = request.user
new_pass = Profile_form.cleaned_data['new_password']
user.set_password(new_pass)
user.save()
logout(request)
return redirect('login')
else:
Profile_form = ProfileForm()
context = {'Profile_form': Profile_form, }
return render(request, "profiles.html", context)
def change_email(request):
if not request.user.is_authenticated:
return redirect('login')
else:
if request.method == 'POST':
Email_Form = EmailForm(request.POST, user=request.user)
if Email_Form.is_valid():
user = request.user
new_email = Email_Form.cleaned_data['email']
user.set_password(new_email)
user.save()
else:
Email_Form = EmailForm()
context = {'Email_Form': Email_Form, }
return render(request,"change_email.html", context) |
py | b4060a21d5c460500cec9ae8003579e9f460ada6 | from PIL import Image
from . import _files
def draw_quadmesh(data, obj):
"""Returns the PGFPlots code for an graphics environment holding a
rendering of the object.
"""
content = []
# Generate file name for current object
filepath, rel_filepath = _files.new_filepath(data, "img", ".png")
# Get the dpi for rendering and store the original dpi of the figure
dpi = data["dpi"]
fig_dpi = obj.figure.get_dpi()
obj.figure.set_dpi(dpi)
# Render the object and save as png file
from matplotlib.backends.backend_agg import RendererAgg
cbox = obj.get_clip_box()
width = int(round(cbox.extents[2]))
height = int(round(cbox.extents[3]))
ren = RendererAgg(width, height, dpi)
obj.draw(ren)
# Generate a image from the render buffer
image = Image.frombuffer(
"RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1
)
# Crop the image to the actual content (removing the the regions otherwise
# used for axes, etc.)
# 'image.crop' expects the crop box to specify the left, upper, right, and
# lower pixel. 'cbox.extents' gives the left, lower, right, and upper
# pixel.
box = (
int(round(cbox.extents[0])),
0,
int(round(cbox.extents[2])),
int(round(cbox.extents[3] - cbox.extents[1])),
)
cropped = image.crop(box)
cropped.save(filepath)
# Restore the original dpi of the figure
obj.figure.set_dpi(fig_dpi)
# write the corresponding information to the TikZ file
extent = obj.axes.get_xlim() + obj.axes.get_ylim()
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
posix_filepath = rel_filepath.as_posix()
content.append(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
f"xmin={extent[0]:{ff}}, xmax={extent[1]:{ff}}, "
f"ymin={extent[2]:{ff}}, ymax={extent[3]:{ff}}] {{{posix_filepath}}};\n"
)
return data, content
|
py | b4060c25d893e50f6650dfdb712b29aea65bbe3c | #!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from ambari_agent.ActionQueue import ActionQueue
from ambari_agent.AmbariConfig import AmbariConfig
from ambari_agent.FileUtil import getFilePath
from ambari_agent.UpgradeExecutor import UpgradeExecutor
from ambari_agent.StackVersionsFileHandler import StackVersionsFileHandler
import os, errno, time, pprint, tempfile, threading
import TestStackVersionsFileHandler
from mock.mock import patch, MagicMock, call
class TestActionQueue(TestCase):
def test_ActionQueueStartStop(self):
actionQueue = ActionQueue(AmbariConfig().getConfig())
actionQueue.IDLE_SLEEP_TIME = 0.01
actionQueue.start()
actionQueue.stop()
actionQueue.join()
self.assertEqual(actionQueue.stopped(), True, 'Action queue is not stopped.')
#This feature is not yet implemented in ActionQueue
def test_RetryAction(self):
pass
def test_command_in_progress(self):
config = AmbariConfig().getConfig()
tmpfile = tempfile.gettempdir()
config.set('agent', 'prefix', tmpfile)
actionQueue = ActionQueue(config)
actionQueue.IDLE_SLEEP_TIME = 0.01
executor_started_event = threading.Event()
end_executor_event = threading.Event()
actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event)
before_start_result = actionQueue.result()
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'status' : 'IN_PROGRESS',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'roleCommand': "roleCommand",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}}
}
actionQueue.put(command)
actionQueue.start()
executor_started_event.wait()
#print ("ii: " + pprint.pformat(actionQueue.commandInProgress))
in_progress_result = actionQueue.result()
end_executor_event.set()
actionQueue.stop()
actionQueue.join()
after_start_result = actionQueue.result()
self.assertEquals(len(before_start_result['componentStatus']), 0)
self.assertEquals(len(before_start_result['reports']), 0)
self.assertEquals(len(in_progress_result['componentStatus']), 0)
self.assertEquals(len(in_progress_result['reports']), 1)
self.assertEquals(in_progress_result['reports'][0]['status'], "IN_PROGRESS")
self.assertEquals(in_progress_result['reports'][0]['stdout'], "Dummy output")
self.assertEquals(in_progress_result['reports'][0]['exitCode'], 777)
self.assertEquals(in_progress_result['reports'][0]['stderr'], 'Dummy err')
self.assertEquals(len(after_start_result['componentStatus']), 0)
self.assertEquals(len(after_start_result['reports']), 1)
self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED")
self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout")
self.assertEquals(after_start_result['reports'][0]['exitCode'], 0)
self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
#print("tmpout: " + pprint.pformat(actionQueue.tmpdir))
#print("before: " + pprint.pformat(before_start_result))
#print("in_progress: " + pprint.pformat(in_progress_result))
#print("after: " + pprint.pformat(after_start_result))
def test_configtags(self):
config = AmbariConfig().getConfig()
tmpfile = tempfile.gettempdir()
config.set('agent', 'prefix', tmpfile)
actionQueue = ActionQueue(config)
actionQueue.IDLE_SLEEP_TIME = 0.01
executor_started_event = threading.Event()
end_executor_event = threading.Event()
actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event)
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'status' : 'IN_PROGRESS',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'roleCommand': "roleCommand",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}},
'configurationTags':{'global' : { 'tag': 'v1' }}
}
actionQueue.put(command)
actionQueue.start()
executor_started_event.wait()
end_executor_event.set()
actionQueue.stop()
actionQueue.join()
after_start_result = actionQueue.result()
configname = os.path.join(tmpfile, 'config.json')
self.assertEquals(len(after_start_result['componentStatus']), 0)
self.assertEquals(len(after_start_result['reports']), 1)
self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED")
self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout")
self.assertEquals(after_start_result['reports'][0]['exitCode'], 0)
self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
self.assertEquals(len(after_start_result['reports'][0]['configurationTags']), 1)
self.assertEquals(True, os.path.isfile(configname))
os.remove(configname)
@patch.object(ActionQueue, "executeCommand")
@patch.object(ActionQueue, "stopped")
def test_upgradeCommand_dispatching(self, stopped_method, executeCommand_method):
queue = ActionQueue(config = MagicMock())
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'roleCommand' : 'UPGRADE',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}},
'roleParams': {},
'commandParams' : {
'source_stack_version' : 'HDP-1.2.1',
'target_stack_version' : 'HDP-1.3.0'
}
}
result = [{
'exitcode' : 0,
'stdout' : 'abc',
'stderr' : 'def'
}]
executeCommand_method.return_value = result
stopped_method.side_effect = [False, False, True, True, True]
queue.stopped = stopped_method
queue.IDLE_SLEEP_TIME = 0.001
queue.put(command)
queue.run()
self.assertTrue(executeCommand_method.called)
self.assertEquals(queue.resultQueue.qsize(), 1)
returned_result = queue.resultQueue.get()
self.assertTrue(returned_result[1] is result[0])
@patch.object(UpgradeExecutor, "perform_stack_upgrade")
def test_upgradeCommand_executeCommand(self, perform_stack_upgrade_method):
queue = ActionQueue(config = MagicMock())
command = {
'commandId': 17,
'role' : "role",
'taskId' : "taskId",
'clusterName' : "clusterName",
'serviceName' : "serviceName",
'roleCommand' : 'UPGRADE',
'hostname' : "localhost.localdomain",
'hostLevelParams': "hostLevelParams",
'clusterHostInfo': "clusterHostInfo",
'configurations': "configurations",
'commandType': "EXECUTION_COMMAND",
'configurations':{'global' : {}},
'roleParams': {},
'commandParams' : {
'source_stack_version' : 'HDP-1.2.1',
'target_stack_version' : 'HDP-1.3.0'
}
}
perform_stack_upgrade_method.return_value = {
'exitcode' : 0,
'stdout' : 'abc',
'stderr' : 'def'
}
result = queue.executeCommand(command)
expected_result = [{'actionId': 17,
'clusterName': 'clusterName',
'exitCode': 0,
'role': 'role',
'serviceName': 'serviceName',
'status': 'COMPLETED',
'stderr': 'def',
'stdout': 'abc',
'taskId': 'taskId'}]
self.assertEquals(result, expected_result)
@patch.object(StackVersionsFileHandler, "read_stack_version")
@patch.object(ActionQueue, "stopped")
def test_status_command_without_globals_section(self, stopped_method,
read_stack_version_method):
config = AmbariConfig().getConfig()
config.set('agent', 'prefix', TestStackVersionsFileHandler.dummyVersionsFile)
queue = ActionQueue(config)
statusCommand = {
"serviceName" : 'HDFS',
"commandType" : "STATUS_COMMAND",
"clusterName" : "",
"componentName" : "DATANODE",
'configurations':{}
}
queue.stopped = stopped_method
stopped_method.side_effect = [False, False, True, True, True]
read_stack_version_method.return_value="1.3.0"
queue.IDLE_SLEEP_TIME = 0.001
queue.put(statusCommand)
queue.run()
returned_result = queue.resultQueue.get()
returned_result[1]['status'] = 'INSTALLED' # Patch live value
self.assertEquals(returned_result, ('STATUS_COMMAND',
{'clusterName': '',
'componentName': 'DATANODE',
'msg': '',
'serviceName': 'HDFS',
'stackVersion': '1.3.0',
'status': 'INSTALLED'}))
class FakeExecutor():
def __init__(self, executor_started_event, end_executor_event):
self.executor_started_event = executor_started_event
self.end_executor_event = end_executor_event
pass
def runCommand(self, command, tmpoutpath, tmperrpath):
tmpout= open(tmpoutpath, 'w')
tmpout.write("Dummy output")
tmpout.flush()
tmperr= open(tmperrpath, 'w')
tmperr.write("Dummy err")
tmperr.flush()
self.executor_started_event.set()
self.end_executor_event.wait()
return {
"exitcode": 0,
"stdout": "returned stdout",
"stderr": "returned stderr"
}
|
py | b4060dc08eb6c2081468795955c97ec221674ff1 | list=[] #to create a empty list
i=input("enter the numbers seperated by comma").split(",") #take the input
for m in i:
list.append(int(m)) #to add the inputs into the list
print(list)
print(tuple(list))
|
py | b4060e4d1b1139635bdf9359b5aea9abf7ae29ea | # -*- coding: utf-8 -*-
__author__ = "苦叶子"
__modifier__ = "[email protected]"
"""
"""
from flask import current_app, session, url_for
from flask_restful import Resource, reqparse
import json
import os
import codecs
import threading
from dateutil import tz
from robot.api import ExecutionResult # done
from utils.file import exists_path
from utils.run import remove_robot
from ..app import scheduler
from utils.schedule import add_schedulejob
from utils.mylogger import getlogger
class TaskList(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('method', type=str)
self.parser.add_argument('user', type=str)
self.parser.add_argument('project', type=str)
self.parser.add_argument('task_name', type=str)
self.parser.add_argument('schedule_type', type=str)
self.parser.add_argument('year', type=str)
self.parser.add_argument('mon', type=str)
self.parser.add_argument('day', type=str)
self.parser.add_argument('hour', type=str)
self.parser.add_argument('min', type=str)
self.parser.add_argument('sec', type=str)
self.parser.add_argument('week', type=str)
self.parser.add_argument('day_of_week', type=str)
self.parser.add_argument('start_date', type=str)
self.parser.add_argument('end_date', type=str)
self.log = getlogger(__name__)
self.app = current_app._get_current_object()
def get(self):
args = self.parser.parse_args()
if args['method'] == 'get_tasklist':
project = args["project"]
return get_task_list(self.app, session['username'], project)
if args['method'] == 'get_schedulejoblist':
return get_schedulejob_list(self.app, args)
def post(self):
args = self.parser.parse_args()
if args["method"] == "get_projecttask":
return get_projecttask(self.app)
elif args["method"] == "pause":
(user,project,task_name) = (args['user'],args['project'],args['task_name'])
job_id = "{}#{}#{}".format(user, project, task_name)
if not user == session["username"]:
self.app.config['DB'].insert_loginfo(session["username"],'schedulejob','pause',job_id,'auth fail')
return {"status": "fail", "msg": "失败:不允许冻结其它人的任务!"}
lock = threading.Lock()
lock.acquire()
try:
scheduler.pause_job(job_id)
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败:{}".format(e)}
lock.release()
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'pause', job_id, 'success')
return {"status": "success", "msg": "成功:冻结任务:{}".format(job_id)}
elif args["method"] == "resume":
(user,project,task_name) = (args['user'],args['project'],args['task_name'])
job_id = "{}#{}#{}".format(user, project, task_name)
if not user == session["username"]:
self.app.config['DB'].insert_loginfo(session["username"],'schedulejob','resume',job_id,'auth fail')
return {"status": "fail", "msg": "失败:不允许恢复其它人的任务!"}
lock = threading.Lock()
lock.acquire()
try:
scheduler.resume_job(job_id)
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败: {}".format(e)}
lock.release()
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'resume', job_id, 'success')
return {"status": "success", "msg": "成功:恢复任务:{}".format(job_id)}
elif args["method"] == "remove_schedulejob":
(user,project,task_name) = (args['user'],args['project'],args['task_name'])
job_id = "{}#{}#{}".format(user, project, task_name)
if not user == session["username"]:
self.app.config['DB'].insert_loginfo(session["username"],'schedulejob','remove_schedulejob',job_id,'auth fail')
return {"status": "fail", "msg": "失败:不允许操作其它人的任务!"}
lock = threading.Lock()
lock.acquire()
try:
scheduler.remove_job(job_id)
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败: {}".format(e)}
lock.release()
res = self.app.config['DB'].runsql("DELETE from schedule_job where user='{}' and project='{}' and task_name='{}';".format(user,project,task_name))
if res:
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'remove_schedulejob', job_id, 'success')
else:
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'remove_schedulejob', job_id, 'DB Fail')
return {"status": "fail", "msg": "失败:数据库操作失败:{}".format(job_id)}
return {"status": "success", "msg": "成功:删除任务成功:{}".format(job_id)}
elif args["method"] == "delete_allschedulejobs":
if not session['username'] == 'Admin':
return {"status": "fail", "msg": "失败:只有Admin可以进行此操作"}
lock = threading.Lock()
lock.acquire()
try:
scheduler.remove_all_jobs()
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败: {}".format(e)}
lock.release()
res = self.app.config['DB'].runsql("DELETE from schedule_job;")
if res:
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'delete_allschedulejobs', 'all', 'success')
else:
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'delete_allschedulejobs', "all", 'DB Fail')
return {"status": "fail", "msg": "失败:数据库操作失败:{}".format('all')}
return {"status": "success", "msg": "成功:删除任务成功:{}".format('all')}
elif args["method"] == "add_job2schedule":
(user,project,task_name) = (args['user'],args['project'],args['task_name'])
job_id = "{}#{}#{}".format(user, project, task_name)
if not user == session["username"]:
self.app.config['DB'].insert_loginfo(session["username"],'schedulejob','add_job2schedule',job_id,'auth fail')
return {"status": "fail", "msg": "失败:不允许操作其它人的任务!"}
job = scheduler.get_job(job_id)
if job:
job.resume()
return {"status": "success", "msg": "任务已存在,开始调度!"}
res = self.app.config['DB'].runsql("SELECT * from schedule_job where user='{}' and project='{}' and task_name='{}' limit 1;".format(user,project,task_name))
if not res:
return {"status": "fail", "msg": "失败:找不到任务!"}
else:
(user,project,task_no,task_name,method,schedule_type,year,mon,day,hour,min,sec,week,
day_of_week,start_date,end_date,sponsor) = res.fetchone()
myargs = {'user': user,
'project': project,
'task_no': task_no,
'task_name': task_name,
'method': method,
'schedule_type': schedule_type,
'year': year,
'mon': mon,
'day': day,
'hour': hour,
'min': min,
'sec': sec,
'week': week,
'day_of_week': day_of_week,
'start_date': start_date,
'end_date': end_date,
'sponsor': sponsor
}
return add_schedulejob(self.app, scheduler, myargs)
elif args["method"] == "edit_schedulejob":
self.log.info("edit_schedulejob, args:{}".format(args))
splits = args["task_name"].split('_#') # user#project#task_name
if len(splits) != 3:
return {"status": "fail", "msg": "失败:任务名称的格式错误:{}".format(args["task_name"])}
(user, project, task_name) = splits
job_id = "{}#{}#{}".format(user, project, task_name)
if not user == session["username"]:
self.app.config['DB'].insert_loginfo(session["username"],'schedulejob','edit_schedulejob',job_id,'auth fail')
return {"status": "fail", "msg": "失败:不允许操作其它人的任务!"}
lock = threading.Lock()
lock.acquire()
try:
job = scheduler.get_job(job_id)
scheduler.remove_job(job_id) if job else None
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败:清理调度任务失败 {}".format(e)}
lock.release()
res = self.app.config['DB'].runsql(''' UPDATE schedule_job set
schedule_type='{}',
year='{}',
mon='{}',
day='{}',
hour='{}',
min='{}',
sec='{}',
week='{}',
day_of_week='{}',
start_date='{}',
end_date='{}' WHERE user='{}' and project='{}' and task_name='{}' ;
'''.format(args['schedule_type'], args['year'], args['mon'], args['day'],
args['hour'], args['min'],
args['sec'], args['week'], args['day_of_week'], args['start_date'],
args['end_date'],
user, project, task_name))
if res:
return {"status": "success", "msg": "成功:修改调度信息成功,可以加入调度任务"}
else:
return {"status": "fail", "msg": "失败:数据操作失败"}
elif args["method"] == "add_schedulejob":
user = session["username"]
self.log.info("add_schedulejob, args:{}".format(args))
splits = args["task_name"].split('_#') # Project_#03Variables_#36
if len(splits) != 3:
return {"status": "fail", "msg": "失败:任务名称的格式错误:{}".format(args["task_name"])}
(project, task_name, task_no) = splits
myargs = {'user': user,
'project':project,
'task_no':task_no,
'task_name': task_name,
'method': args['method'],
'schedule_type': args['schedule_type'],
'year': args['year'],
'mon': args['mon'],
'day': args['day'],
'hour': args['hour'],
'min': args['min'],
'sec': args['sec'],
'week': args['week'],
'day_of_week': args['day_of_week'],
'start_date': args['start_date'],
'end_date': args['end_date'],
'sponsor': 'user'
}
if self.app.config['DB'].add_chedulejob(myargs):
return add_schedulejob(self.app, scheduler, myargs)
else:
return {"status": "fail", "msg": "失败:添加调度任务失败,插入数据库失败。"}
elif args["method"] == "pause_scheduler":
if not session['username'] == 'Admin':
return {"status": "fail", "msg": "失败:只有Admin可以进行此操作"}
lock = threading.Lock()
lock.acquire()
try:
scheduler.pause()
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败: {}".format(e)}
lock.release()
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'pause_scheduler', "all", 'success')
return {"status": "success", "msg": "成功:调度器已停止"}
elif args["method"] == "resume_scheduler":
if not session['username'] == 'Admin':
return {"status": "fail", "msg": "失败:只有Admin可以进行此操作"}
lock = threading.Lock()
lock.acquire()
try:
scheduler.resume()
except Exception as e:
lock.release()
return {"status": "fail", "msg": "失败: {}".format(e)}
lock.release()
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'resume_scheduler', "all", 'success')
return {"status": "success", "msg": "成功:调度器已恢复运行"}
elif args['method'] == "remove_myschedulejobs":
lock = threading.Lock()
lock.acquire()
try:
for job in scheduler.get_jobs():
(user,project,task_name) = job.id.split('#')
if user == session['username']:
scheduler.remove_job(job.id)
except Exception as e:
lock.release()
return {"status": "fail", "msg": "Fail: {}".format(e)}
lock.release()
res = self.app.config['DB'].runsql("DELETE from schedule_job where user='{}';".format(session['username']))
if res:
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'remove_myschedulejobs', 'all',
'success')
else:
self.app.config['DB'].insert_loginfo(session["username"], 'schedulejob', 'remove_myschedulejobs', 'all',
'DB Fail')
return {"status": "fail", "msg": "数据库操作失败"}
return {"status": "success", "msg": "删除任务成功"}
def get_task_list(app, username, project):
log = getlogger(__name__)
job_path = app.config["AUTO_HOME"] + "/jobs/%s/%s" % (username, project)
next_build = 0
task = []
if exists_path(job_path):
next_build = get_next_build_number(job_path)
if next_build != 0:
# 遍历所有任务结果
# 判断最近一个任务状态
icons = {
"running": url_for('static', filename='img/running.gif'),
"success": url_for('static', filename='img/success.png'),
"fail": url_for('static', filename='img/fail.png'),
"exception": url_for('static', filename='img/exception.png')}
#if exists_path(job_path + "/%s" % (next_build - 1)):
running = False
lock = threading.Lock()
lock.acquire()
remove_robot(app)
for p in app.config["AUTO_ROBOT"]:
if p["name"] == project:
app.log.info("P name == project :{}".format(project))
running = True
break
lock.release()
if running:
task.append(
{
"status": icons["running"],
"name": "%s_#%s" % (project, next_build-1),
"success": "",
"fail": ""
}
)
last = 1
if running:
last = 2
for i in range(next_build-last, -1, -1):
output_path = job_path + "/%s" % i
if exists_path(output_path):
if exists_path(output_path + "/output.xml"): # robot 的标志性输出
try:
driver = get_taskdriver(output_path + "/cmd.txt")
suite = ExecutionResult(output_path + "/output.xml").suite
stat = suite.statistics
name = suite.name
if stat.failed != 0:
status = icons["fail"]
else:
status = icons['success']
task.append({
"task_no": i,
"status": status,
"name": "<a href='/view_report/%s/%s_log' target='_blank'>%s_#%s_log</a>" % (project, i, name, i),
"driver": driver,
"success": stat.passed,
"fail": stat.failed,
"starttime": suite.starttime,
"endtime": suite.endtime,
"elapsedtime": suite.elapsedtime,
"note": "<a href='/view_report/%s/%s_report' target='_blank'>%s_#%s_report</a>" % (project, i, name, i)
})
except:
status = icons["exception"]
if i == next_build-last:
status = icons["running"]
task.append({
"task_no": i,
"status": status,
"name": "%s_#%s" % (project, i),
"driver":driver,
"success": "-",
"fail": "-",
"starttime": "-",
"endtime": "-",
"elapsedtime": "-",
"note": "Abnormal"
})
continue
if exists_path(output_path + "/pytest_res.txt"): # pytest 的输出是 pytest_res.txt
driver = get_taskdriver(job_path + "/%s/cmd.txt" % i)
result = {}
with open(output_path + "/pytest_res.txt", 'r') as rf:
result = json.load(rf)
success = result["success"]
fail = result["fail"]
elapsedtime = result["duration"]
source = result["source"]
name = os.path.basename(source)
if result["fail"] > 0:
status = icons["fail"]
else:
status = icons["success"]
task.append({
"task_no": i,
"status": status,
"name": "<a href='/view_report/%s/%s_log' target='_blank'>%s_#%s_log</a>" % (
project, i, name, i),
"driver": driver,
"success": success,
"fail": fail,
"starttime": "unknown",
"endtime": "unknown",
"elapsedtime": elapsedtime,
"note": "<a href='/view_report/%s/%s_report' target='_blank'>%s_#%s_report</a>" % (
project, i, source, i)
})
else:
status = icons["exception"]
if i == next_build - last:
status = icons["running"]
task.append({
"task_no": i,
"status": status,
"name": "%s_#%s" % (project, i),
"driver": "unknown",
"success": "-",
"fail": "-",
"starttime": "-",
"endtime": "-",
"elapsedtime": "-",
"note": "Abnormal"
})
return {"total": next_build-1, "rows": task}
def get_schedulejob_list(app, args):
joblist = []
res = app.config['DB'].runsql('SELECT * from schedule_job;')
for i in res:
(user,project,task_no,task_name,method,schedule_type,
year,mon,day,hour,min,sec,week,
day_of_week,start_date,end_date,sponsor) = i
joblist.append([user,project,task_name,task_no,method,schedule_type,
year,mon,day,hour,min,sec,week,
day_of_week,start_date,end_date,sponsor,'unScheduled','']) #job_id = "{}#{}#{}".format(user,project,task_name)
jobs = scheduler.get_jobs()
jobids = [x.id for x in jobs]
for j in joblist:
id = j[0]+'#'+j[1]+'#'+j[2]
if id in jobids:
jb = scheduler.get_job(id)
j[18] = jb.next_run_time
j[17] = 'running' if j[18] is not None else 'pause'
jobids.remove(id)
for i in jobids:
(u,p,t) = i.split('#')
jb = scheduler.get_job(i)
joblist.append(u,p,t,'','','','','','','','','','','','','','',
'running' if jb.next_run_time is not None else 'pause', jb.next_run_time)
icons = {
"pause": url_for('static', filename='img/unknown.png'),
"running": url_for('static', filename='img/success.png'),
"unScheduled": url_for('static', filename='img/fail.png'),
"schedulerPaused": url_for('static', filename='img/innormal.png')
}
rlist = []
for j in joblist:
if j[17] == 'running':
status = icons['running']
elif j[17] == 'pause':
status = icons['pause']
else:
status = icons['unScheduled']
if scheduler.state == 2:
status = icons['schedulerPaused']
rlist.append(
{
"user": j[0],
"project": j[1],
"task_name": j[2],
#"task_no": j[3],
#"method": j[4],
"schedule_type": j[5],
"year": j[6],
"mon": j[7],
"day": j[8],
"hour": j[9],
"min": j[10],
"sec": j[11],
"week": j[12],
"day_of_week": j[13],
"start_date": j[14],
"end_date": j[15],
"sponsor": j[16],
"status": status,
"next_time": str(j[18])
}
)
return {"total": 1, "rows": rlist}
def get_last_task(app, username, project):
icons = {
"running": url_for('static', filename='img/running.gif'),
"success": url_for('static', filename='img/success.png'),
"fail": url_for('static', filename='img/fail.png'),
"exception": url_for('static', filename='img/exception.png')}
job_path = app.config["AUTO_HOME"] + "/jobs/%s/%s" % (username, project)
status = icons["running"]
if exists_path(job_path):
next_build = get_next_build_number(job_path)
last_job = next_build-1
if exists_path(job_path + "/%s" % last_job):
try:
suite = ExecutionResult(job_path + "/%s/output.xml" % last_job).suite
stat = suite.statistics
if stat.failed != 0:
status = icons["fail"]
else:
status = icons['success']
except:
status = icons["running"]
else:
status = icons["exception"]
else:
status = icons['success']
return status
def get_projecttask(app):
projects = app.config['DB'].get_allproject(session["username"])
task_list = {"total": len(projects), "rows": []}
for op in projects:
#p = op.split(':')[1] # projects = ["owner:project","o:p"]
task = {
# "status": status,
"project": op,
# "last_success": get_last_pass(job_path + "/lastPassed"),
# "last_fail": get_last_fail(job_path + "/lastFail"),
"enable": "Enalble",
"next_time": get_next_time(app, op),
"cron": "* * * * * *",
"status": get_last_task(app, session["username"], op)
}
task_list["rows"].append(task)
return task_list
def get_last_pass(job_path):
passed = "无"
passed_path = job_path + "lastPassed"
if exists_path(passed_path):
f = codecs.open(passed_path, "r", "utf-8")
passed = f.read()
f.close()
return passed
def get_last_fail(job_path):
fail = "无"
fail_path = job_path + "lastFail"
if exists_path(fail_path):
f = codecs.open(fail_path, "r", "utf-8")
fail = f.read()
f.close()
return fail
def get_next_build_number(job_path):
next_build_number = 1
next_path = job_path + "/nextBuildNumber"
if exists_path(next_path):
f = codecs.open(next_path, "r", "utf-8")
next_build_number = int(f.read())
f.close()
return next_build_number
def get_next_time(app, name):
job = scheduler.get_job("%s_%s" % (session["username"], name))
if job:
to_zone = tz.gettz("CST")
return job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")
else:
return "-"
def edit_cron(app, name, cron):
user_path = app.config["AUTO_HOME"] + "/users/" + session["username"]
if os.path.exists(user_path):
config = json.load(codecs.open(user_path + '/config.json', 'r', 'utf-8'))
index = 0
for p in config["data"]:
if p["name"] == name:
config["data"][index]["cron"] = cron
break
index += 1
json.dump(config, codecs.open(user_path + '/config.json', 'w', 'utf-8'))
return True
return False
def get_projecttaskdir(app, project):
#TODO : 适配多用户公用project
projecttaskdir = app.config["AUTO_HOME"] + "/jobs/" + session["username"] + "/%s" % (project)
return projecttaskdir
def get_taskdriver(cmdfile):
if not os.path.exists(cmdfile):
return 'Unknown'
else:
with open(cmdfile, 'r') as f:
ln = f.readline().strip()
splits = ln.split('|')
return splits[0] if len(splits) > 1 else 'Unknown'
|
py | b4061026a6a8b51049627ed8329e242fb6293107 | # 8888888888 .d88888b. 8888888b. .d88888b.
# 888 d88P" "Y88b 888 "Y88b d88P" "Y88b
# 888 888 888 888 888 888 888
# 888 888 888 888 888 888 888
# 888 888 888 888 888 888 888
# 888 888 888 888 888 888 888
# 888 Y88b. .d88P 888 .d88P Y88b. .d88P
# 888 "Y88888P" 8888888P" "Y888888"
# push button to remove the inactive nodes
# Use Qt threads
# single node refresh
# store data
# display data on graphs
import re
import sys
import qdarkstyle
from time import sleep, time
from threading import Lock, Thread
from PyQt5 import QtGui, QtWidgets, QtCore
from serial_interface import serial_constants as S_C
# pylint: disable=no-name-in-module
from PyQt5.QtWidgets import QApplication, QMainWindow
from GUI.Employee_overview_ui import Ui_Employees_Overview
from serial_interface.overview_processing import OverviewProcessing
import serial_interface.data_logger as D_L
###############################################################################
## Classe used heriting from Push Button to animate the blinkin
##
class AnimatedButton(QtWidgets.QPushButton):
def __init__(self, darkmode):
QtWidgets.QPushButton.__init__(self)
self.darkmode = darkmode
if (self.darkmode):
color1 = QtGui.QColor(0x31, 0x36, 0x3b)
color2 = QtGui.QColor("black")
else:
color1 = QtGui.QColor(0xe1, 0xe1, 0xe1)
color2 = QtGui.QColor("red")
self.co_get = 0
self.co_set = 0
byar = QtCore.QByteArray()
byar.append('zcolor')
self.color_anim = QtCore.QPropertyAnimation(self, byar)
self.color_anim.setStartValue(color1)
self.color_anim.setKeyValueAt(0.5, color2)
self.color_anim.setEndValue(color1)
self.color_anim.setDuration(900)
self.color_anim.setLoopCount(7)
self.custom_anim = QtCore.QPropertyAnimation(self, byar)
# parse the current palette
def parseStyleSheet(self):
ss = self.styleSheet()
sts = [s.strip() for s in ss.split(';') if len(s.strip())]
return sts
# get the background color of the current palette
def getBackColor(self):
self.co_get += 1
return self.palette().color(self.pal_ele)
# Set the new backgroud
def setBackColor(self, color):
self.co_set += 1
sss = self.parseStyleSheet()
bg_new = 'background-color: rgba(%d,%d,%d,%d);' % (color.red(),
color.green(), color.blue(), color.alpha())
for k, sty in enumerate(sss):
if re.search(r'\Abackground-color:', sty):
sss[k] = bg_new
break
else:
sss.append(bg_new)
self.setStyleSheet('; '.join(sss))
def gray_out(self):
self.setEnabled(False)
def un_gray_out(self):
self.setDisabled(False)
pal_ele = QtGui.QPalette.Window
zcolor = QtCore.pyqtProperty(QtGui.QColor, getBackColor, setBackColor)
###############################################################################
## Main GUI window
##
class MyMainWindow(QMainWindow, Ui_Employees_Overview):
def __init__(self, darkmode=0, parent=None):
super(MyMainWindow, self).__init__(parent=None)
self.setupUi(self)
self.buttonGroup = QtWidgets.QButtonGroup(self)
self.buttonGroup.buttonClicked.connect(self.handleButtonClicked)
# Windows Icon setting-up
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("GUI/gui_image/drop_icon.jpg"),
QtGui.QIcon.Normal, QtGui.QIcon.On)
self.setWindowIcon(icon)
# Windows image setting-up
QPixmap_path = "GUI/gui_image/water_bottle.jpg"
self.LB_image.setPixmap(QtGui.QPixmap(QPixmap_path))
# Push buttogn signal function setting-up
self.PB_report .clicked.connect(self.PB_report_f)
self.PB_notif_all .clicked.connect(self.PB_notif_all_f)
self.PB_drink .clicked.connect(self.PB_drink_f)
self.PB_connect_nrf .clicked.connect(self.PB_connect_nrf_f)
self.PB_save .clicked.connect(self.PB_save_f)
self.PB_import .clicked.connect(self.PB_import_f)
self.PB_set_name .clicked.connect(self.PB_set_name_f)
self.darkmode = darkmode
# instanciate main processing object
self.overview_processing = OverviewProcessing()
self.empl_button_dict = dict()
self.empl_dict_temp = dict()
# try a first connection to the nrf
self.timer = QtCore.QTimer()
# self.timer.timeout.connect(self.update)
self.timer.start(1000) #trigger every sec.
self.timer.timeout.connect(self.timer_refresh_UI)
self.selected_employee_addr = ""
self.blink_tr = None
connect_nrf(self)
D_L.init_logger()
print("\nOVERVIEW Starting #################\n\n\r")
###########################################################################
# Class functions
##
def handleButtonClicked(self, button):
for item in self.buttonGroup.buttons():
if button is item:
PB_employee_f(self, item.objectName())
def PB_report_f(self):
send_notif_cmd(self)
def PB_notif_all_f(self):
send_notif_all_cmd(self)
def PB_drink_f(self):
send_drink_cmd(self)
def PB_connect_nrf_f(self):
connect_nrf(self)
# def PB_set_path_f(self):
# set_config_file(self)
def PB_set_name_f(self):
showDial(self)
def PB_save_f(self):
save_config(self)
def PB_import_f(self):
import_config(self)
def timer_refresh_UI(self):
refresh_UI(self)
###############################################################################
## end class MyMainWindow #####################################################
###############################################################################
# function to display another form
###############################################################################
## function to display a form for the name changing
##
def showDial(M_W):
if (M_W.selected_employee_addr == ""):
print("no employee selected")
else:
text, okPressed = QtWidgets.QInputDialog.getText(M_W, "",
"Employee name:", QtWidgets.QLineEdit.Normal, "")
if okPressed and text != '':
if (M_W.selected_employee_addr != ""):
M_W.overview_processing.config_employee.employee_dict[ \
M_W.selected_employee_addr].name = text
###############################################################################
## Refresh the UI
##
def refresh_UI(M_W):
refresh_all(M_W)
# TO DO implement a single not based on a Queue refresh process
# refresh_all_cnt = 0
# refresh_node(M_W)
# refresh_all_cnt = (refresh_all_cnt + 1) % 5
# if (not refresh_all_cnt):
# refresh_all(M_W)
###############################################################################
## to be implemented
## refresh a single node
##
def refresh_node(M_W):
pass
###############################################################################
## Refresh all node in a row, rather heavy function so it needs to be
## caller not often
##
def refresh_all(M_W):
# temporary coppy of the employee copy
M_W.empl_dict_temp = dict(
M_W.overview_processing.config_employee.employee_dict)
# run through the temporary dict
for empl_key in list(M_W.empl_dict_temp):
empl_value = M_W.empl_dict_temp[empl_key]
# add the employe to the button dict if does not exist
if empl_key not in M_W.empl_button_dict:
M_W.empl_button_dict[empl_key] = empl_value
add_push_button(M_W, str(empl_key), empl_value)
# else simply refresh the employee
else:
M_W.empl_button_dict[empl_key] = empl_value
refresh_push_button(M_W, str(empl_key), empl_value)
if (empl_value.notif):
set_notif(M_W, empl_key)
# refresh the employe detailed info
if (empl_key == M_W.selected_employee_addr):
PB_employee_f(M_W, empl_key)
D_L.log_to_file(empl_value)
###############################################################################
## display a notification emanating from an employee by blinking his
## dedicated push button
##
def set_notif (M_W, empl_key):
blink(M_W, empl_key)
M_W.overview_processing.config_employee.un_notify(empl_key)
employee = M_W.empl_button_dict[empl_key]
employee.notif = 0
M_W.empl_button_dict[empl_key] = employee
###############################################################################
## Add a push button dynamically to the employee list the push buttons
## are contained in a pushbutton group
##
def add_push_button(M_W, button_name, employee):
BT_name = button_name
# empl_button = QtWidgets.QPushButton(BT_name, M_W.SAWidget_employee_list)
empl_button = AnimatedButton(M_W.darkmode)
empl_button.setObjectName(BT_name)
empl_button_txt = get_push_button_txt(employee)
empl_button.setText(empl_button_txt)
M_W.verticalLayout.addWidget(empl_button)
# link each employee buttons to its function
for button in M_W.VGB_employees.findChildren(AnimatedButton):
# for button in M_W.VGB_employees.findChildren(QtWidgets.QPushButton):
if (M_W.buttonGroup.id(button) < 0):
M_W.buttonGroup.addButton(button)
###############################################################################
## refresh a single node info and gray it out if inactive
##
def refresh_push_button(M_W, button_name, employee):
for button in M_W.VGB_employees.findChildren(QtWidgets.QPushButton):
if (button.objectName() == button_name):
empl_key = button_name
# empl_button_txt = str(employee.name) + " " + \
# str(int(employee.dehydration))
empl_button_txt = get_push_button_txt(employee)
button.setText(empl_button_txt)
if (not is_up_to_date(M_W, empl_key)):
button.gray_out()
else:
button.un_gray_out()
###############################################################################
## Format employee info for the push button
##
def get_push_button_txt(employee):
return str(employee.name) + ' ' + \
"\n\rSkin Temperature " + \
str(int(employee.skin_temperature)) + '°C '\
"\n\rAmbiant Temperature " + \
str(int(employee.ambiant_temperature)) + '°C ' + \
"\n\rAmbiant Humidity " + \
str(int(employee.ambiant_humidity)) + '%'
###############################################################################
## Check if a single node has been inactive for too long
##
def is_up_to_date(M_W, empl_key):
empl_date = M_W.empl_button_dict[empl_key].last_activity
time_now = time()
delta_time = time_now - empl_date
if (delta_time >= 10):
return 0
else:
return 1
###############################################################################
## fuction called by the employee push button list, that will refresh
## the detailled info about the selected employee, call either by a
## push button click or a refresh all for the selected employee
##
def PB_employee_f(M_W, PB_name):
employee = M_W.empl_button_dict[PB_name]
update_selected_employee_info( M_W,
employee.name,
PB_name,
employee.ambiant_temperature,
employee.ambiant_humidity,
employee.skin_temperature)
###############################################################################
# Test and Connect the serial UART connection
##
def connect_nrf(M_W):
ret = M_W.overview_processing.nrf_serial.connect_nrf()
if (ret):
M_W.LB_status_val.setText("Connected")
else:
M_W.LB_status_val.setText("Not Connected")
###############################################################################
# Update the labels containing the info of the current
# selected employee
# arg number: employee number
# arg name: employee name
# arg dehydration: employee dehydration number
##
def update_selected_employee_info(M_W, name, address,
ambiant_temperature, ambiant_humidity, skin_temperature):
M_W.selected_employee_addr = address
M_W.LB_name_val .setText(name)
M_W.LB_select_val .setText(str(address))
M_W.LB_dehydration_val .setText(str(int(skin_temperature))+'°C')
M_W.LB_temperature_val .setText(str(int(ambiant_temperature))+'°C')
M_W.LB_humidity_val .setText(str(int(ambiant_humidity))+'%')
###############################################################################
## Save the configuration info of the employee (name and other data)
##
def save_config(M_W):
save_path = QtWidgets.QFileDialog.getSaveFileName()
if (save_path[0] != ""):
M_W.overview_processing.config_employee.save_config(save_path[0])
###############################################################################
## Import a configuration, usefull if we don't want to retype the employee
## name each time
##
def import_config(M_W):
import_path = QtWidgets.QFileDialog.getOpenFileName()
if (import_path[0] != ""):
M_W.overview_processing.config_employee.import_config(import_path[0])
###############################################################################
## Blink a custom push button in the employee list
##
def blink(M_W, empl_key):
for button in M_W.VGB_employees.findChildren(AnimatedButton):
if (button.objectName() == empl_key):
button.color_anim.stop()
button.color_anim.start()
###############################################################################
## send drink command notification to the currently selected node
##
def send_drink_cmd(M_W):
if (M_W.selected_employee_addr != ""):
M_W.overview_processing.nrf_serial.send_to_node(
M_W.selected_employee_addr, S_C.DRINK_NOTIF)
else:
M_W.LB_name_val.setText("No employee selected")
###############################################################################
## send general command notification to the currently selected node
##
def send_notif_cmd(M_W):
if (M_W.selected_employee_addr != ""):
M_W.overview_processing.nrf_serial.send_to_node(
M_W.selected_employee_addr, S_C.NOTIF)
else:
M_W.LB_name_val.setText("No employee selected")
###############################################################################
## send notif to ALL nodes
##
def send_notif_all_cmd(M_W):
M_W.overview_processing.nrf_serial.send_to_all(S_C.NOTIF_ALL)
###############################################################################
## MAIN
##
def main():
app = QApplication(sys.argv)
darkmode = 0
arguments = sys.argv[1:]
count = len(arguments)
if (count == 0):
pass
elif ((count == 1) and (sys.argv[1] == "-dark")):
# Set dark theme
darkmode = 1
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
else:
print("\n\r USAGE: python main.py [-dark] \n\r")
return 1
w = MyMainWindow(darkmode)
# w = MyMainWindow()
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
py | b4061074c39ef46ca5982d0b1bfa2e033118e7b3 | import unittest
from qiskit.circuit import QuantumCircuit, ParameterVector
from surfer.tools.unroll_parameterized_gates import UnrollParameterizedGates
class TestUnrollParameterized(unittest.TestCase):
"""Test the pass to unroll parameterized gates."""
def setUp(self):
super().setUp()
self.supported_gates = ["rx", "ry", "rz", "cp", "crx", "cry", "crz"]
def test_only_parameterized_is_unrolled(self):
"""Test only parameterized gates are unrolled."""
x = ParameterVector("x", 4)
block1 = QuantumCircuit(1)
block1.rx(x[0], 0)
sub_block = QuantumCircuit(2)
sub_block.cx(0, 1)
sub_block.rz(x[2], 0)
block2 = QuantumCircuit(2)
block2.ry(x[1], 0)
block2.append(sub_block.to_gate(), [0, 1])
block3 = QuantumCircuit(3)
block3.ccx(0, 1, 2)
circuit = QuantumCircuit(3)
circuit.append(block1.to_gate(), [1])
circuit.append(block2.to_gate(), [0, 1])
circuit.append(block3.to_gate(), [0, 1, 2])
circuit.cry(x[3], 0, 2)
unroller = UnrollParameterizedGates(self.supported_gates)
unrolled = unroller(circuit)
expected = QuantumCircuit(3)
expected.rx(x[0], 1)
expected.ry(x[1], 0)
expected.cx(0, 1)
expected.rz(x[2], 0)
expected.append(block3.to_gate(), [0, 1, 2])
expected.cry(x[3], 0, 2)
self.assertEqual(unrolled, expected)
|
py | b406116863385d89c7fc00c850fb52d519e7dd18 | from shape import Shape
from point import Point
class Square(Shape):
"""
A square Shape.
"""
def __init__(self, corners):
"""
Create Square self with vertices corners.
Assume all sides are equal and corners are square.
@param Square self: this Square object
@param list[Point] corners: corners that define this Square
@rtype: None
>>> s = Square([Point(0, 0), Point(1, 0), Point(1, 1), Point(0, 0)])
"""
Shape.__init__(self, corners)
def _set_area(self):
"""
Set Square self's area.
Overrides Shape._set_area
@type self: Square
@rtype: float
>>> s = Square([Point(0,0), Point(10,0), Point(10,10), Point(0,10)])
>>> s.area
100.0
"""
self._area = self.corners[-1].distance(self.corners[0])**2
if __name__ == '__main__':
import doctest
doctest.testmod()
s = Square([Point(0, 0)])
# Pycharm flags these,
# as it should.
# print(s.corners + "three")
# print(s.area + "three") |
py | b4061230f8cef3472664ce73f575e35fa8d61cf7 | from django import template
register = template.Library()
@register.filter("can_user_challenge")
def can_user_challenge(debate, user):
return debate.can_user_challenge(user)
|
py | b40612b9664209ee45974282b2fe7be41f728d46 | import numpy as np
import tensorflow as tf
from collections import Counter
from textdect.convertmodel import ConvertedModel
from misc.freezemodel import FreezedModel
from mesgclsf.datapreptools import resize_to_desired
from mesgclsf.s2train import FEATURE_HEIGHT, FEATURE_WIDTH
def classify(classifier, session, img_arr, stride=16):
"""
Take an image file (an output from the first step), read and analyze the image, and
returns the class ID of the input image based on the message content on it.
Args:
classifier: A FreezedModel constructed based on a trained model for step 2.
session: The TensorFlow session.
img_arr: A numpy ndarray that holds the image features.
stride: Optional. The stride of the sliding.
Returns:
class_id: Message class ID of the input image.
confidence: A percentage indicating how many slided images were predicted as
this class identified by the class_id.
"""
height, width = FEATURE_HEIGHT, FEATURE_WIDTH
resized_img = resize_to_desired(img_arr)
img_height, img_width = resized_img.shape
assert img_height == height
features = []
for x in range(0, img_width - FEATURE_WIDTH + 1, stride):
this_win = resized_img[:, x:x + FEATURE_WIDTH]
features.append(this_win.reshape(-1))
_, indices = classifier.predict(session, np.asarray(features))
img_cnt = len(features)
cls_list = []
for i in range(img_cnt):
cls_list.append(indices[i][0])
class_id, pos_cnt = Counter(cls_list).most_common()[0]
confidence = (pos_cnt / img_cnt) * 100.0
return class_id, confidence
def detect_and_classify(detector, classifier, session, image_array, debug=False):
boxes = detector.predict(session, image_array)
for box in boxes:
xmin, ymin, xmax, ymax = box.get_coordinates()
gry_arr = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)
area_img = gry_arr[ymin:ymax, xmin:xmax]
cls_id, conf = classify(classifier, session, area_img)
if debug:
cv2.rectangle(image_array, (xmin, ymin), (xmax, ymax), (255, 0, 0), 1)
print("class ID: {}, confidence: {}".format(cls_id, conf))
if __name__ == "__main__":
import cv2
import json
import os
import matplotlib.pyplot as plt
from time import time
from settings import PROJECT_ROOT
t0 = time()
model_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
s1_model = 's1_keras_model.pb'
lss_model = 's2_lss_model.pb'
tas_model = 's2_tas_model.pb'
sign_type = 'TAS' # LSS or TAS
img_file = os.path.join(PROJECT_ROOT, 'Data', 'Step1', 'Test', 'sign1.jpg')
img_arr = cv2.imread(img_file)
s1_config_file = os.path.join(PROJECT_ROOT, 'textdect', 'config.json')
with open(s1_config_file) as config_buffer:
s1_config = json.loads(config_buffer.read())
with tf.Graph().as_default() as graph:
detector = ConvertedModel(s1_config, graph, 's1_keras', model_dir, s1_model)
lss_classifier = FreezedModel(graph, 's2_lss', model_dir, lss_model)
tas_classifier = FreezedModel(graph, 's2_tas', model_dir, tas_model)
with tf.Session(graph=graph) as sess:
if sign_type == 'LSS':
detect_and_classify(detector, lss_classifier, sess, img_arr, debug=True)
else:
detect_and_classify(detector, tas_classifier, sess, img_arr, debug=True)
t1 = time()
print("Running time: {:4.2f} seconds".format(t1-t0))
plt.imshow(img_arr)
plt.show()
|
py | b40612d51ca983c3f185a203ca3306816564da54 | import os, random, struct
import py
from rpython.jit.backend.x86 import rx86
from rpython.rlib.rarithmetic import intmask
from rpython.tool.udir import udir
INPUTNAME = 'checkfile_%s.s'
FILENAME = 'checkfile_%s.o'
BEGIN_TAG = '<<<rx86-test-begin>>>'
END_TAG = '<<<rx86-test-end>>>'
class CodeCheckerMixin(object):
def __init__(self, expected, accept_unnecessary_prefix):
self.expected = expected
self.accept_unnecessary_prefix = accept_unnecessary_prefix
self.index = 0
def begin(self, op):
self.op = op
self.instrindex = self.index
def writechar(self, char):
if char != self.expected[self.index:self.index+1]:
if (char == self.accept_unnecessary_prefix
and self.index == self.instrindex):
return # ignore the extra character '\x40'
print self.op
print "\x09from rx86.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..."
print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..."
raise Exception("Differs")
self.index += 1
def done(self):
assert len(self.expected) == self.index
def stack_frame_size_delta(self, delta):
pass # ignored
def check_stack_size_at_ret(self):
pass # ignored
def hexdump(s):
return ' '.join(["%02X" % ord(c) for c in s])
def reduce_to_32bit(s):
if s[:2] != '%r':
return s
if s[2:].isdigit():
return s + 'd'
else:
return '%e' + s[2:]
# ____________________________________________________________
COUNT1 = 15
suffixes = {0:'', 1:'b', 2:'w', 4:'l', 8:'q'}
class TestRx86_32(object):
WORD = 4
TESTDIR = 'rx86_32'
X86_CodeBuilder = rx86.X86_32_CodeBuilder
REGNAMES = ['%eax', '%ecx', '%edx', '%ebx', '%esp', '%ebp', '%esi', '%edi']
REGNAMES8 = ['%al', '%cl', '%dl', '%bl', '%ah', '%ch', '%dh', '%bh']
XMMREGNAMES = ['%%xmm%d' % i for i in range(16)]
REGS = range(8)
REGS8 = [i|rx86.BYTE_REG_FLAG for i in range(8)]
NONSPECREGS = [rx86.R.eax, rx86.R.ecx, rx86.R.edx, rx86.R.ebx,
rx86.R.esi, rx86.R.edi]
accept_unnecessary_prefix = None
methname = '?'
def reg_tests(self):
return self.REGS
def reg8_tests(self):
return self.REGS8
def xmm_reg_tests(self):
return self.reg_tests()
def stack_bp_tests(self, count=COUNT1):
return ([0, 4, -4, 124, 128, -128, -132] +
[random.randrange(-0x20000000, 0x20000000) * 4
for i in range(count)])
def stack_sp_tests(self, count=COUNT1):
return ([0, 4, 124, 128] +
[random.randrange(0, 0x20000000) * 4
for i in range(count)])
def memory_tests(self):
return [(reg, ofs)
for reg in self.NONSPECREGS
for ofs in self.stack_bp_tests(5)
]
def array_tests(self):
return [(reg1, reg2, scaleshift, ofs)
for reg1 in self.NONSPECREGS
for reg2 in self.NONSPECREGS
for scaleshift in [0, 1, 2, 3]
for ofs in self.stack_bp_tests(1)
]
def imm8_tests(self):
v = ([-128,-1,0,1,127] +
[random.randrange(-127, 127) for i in range(COUNT1)])
return v
def imm32_tests(self):
v = ([-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255] +
[random.randrange(-32768,32768)<<16 |
random.randrange(0,65536) for i in range(COUNT1)] +
[random.randrange(128, 256) for i in range(COUNT1)])
return self.imm8_tests() + v
def relative_tests(self):
py.test.skip("explicit test required for %r" % (self.methname,))
def get_all_tests(self):
return {
'r': self.reg_tests,
'r8': self.reg8_tests,
'x': self.xmm_reg_tests,
'b': self.stack_bp_tests,
's': self.stack_sp_tests,
'm': self.memory_tests,
'a': self.array_tests,
'i': self.imm32_tests,
'i8': self.imm8_tests,
'j': self.imm32_tests,
'l': self.relative_tests,
}
def assembler_operand_reg(self, regnum):
return self.REGNAMES[regnum]
def assembler_operand_reg8(self, regnum):
assert regnum & rx86.BYTE_REG_FLAG
return self.REGNAMES8[regnum &~ rx86.BYTE_REG_FLAG]
def assembler_operand_xmm_reg(self, regnum):
return self.XMMREGNAMES[regnum]
def assembler_operand_stack_bp(self, position):
return '%d(%s)' % (position, self.REGNAMES[5])
def assembler_operand_stack_sp(self, position):
return '%d(%s)' % (position, self.REGNAMES[4])
def assembler_operand_memory(self, (reg1, offset)):
if not offset: offset = ''
return '%s(%s)' % (offset, self.REGNAMES[reg1])
def assembler_operand_array(self, (reg1, reg2, scaleshift, offset)):
if not offset: offset = ''
return '%s(%s,%s,%d)' % (offset, self.REGNAMES[reg1],
self.REGNAMES[reg2], 1<<scaleshift)
def assembler_operand_imm(self, value):
return '$%d' % value
def assembler_operand_imm_addr(self, value):
return '%d' % value
def get_all_assembler_operands(self):
return {
'r': self.assembler_operand_reg,
'r8': self.assembler_operand_reg8,
'x': self.assembler_operand_xmm_reg,
'b': self.assembler_operand_stack_bp,
's': self.assembler_operand_stack_sp,
'm': self.assembler_operand_memory,
'a': self.assembler_operand_array,
'i': self.assembler_operand_imm,
'i8': self.assembler_operand_imm,
'j': self.assembler_operand_imm_addr,
}
def run_test(self, methname, instrname, argmodes, args_lists,
instr_suffix=None):
global labelcount
labelcount = 0
oplist = []
testdir = udir.ensure(self.TESTDIR, dir=1)
inputname = str(testdir.join(INPUTNAME % methname))
filename = str(testdir.join(FILENAME % methname))
g = open(inputname, 'w')
g.write('\x09.string "%s"\n' % BEGIN_TAG)
#
if instrname == 'MOVDQ':
if self.WORD == 8:
instrname = 'MOVQ'
else:
instrname = 'MOVD'
if argmodes == 'xb':
py.test.skip('"as" uses an undocumented alternate encoding??')
if argmodes == 'xx' and self.WORD != 8:
instrname = 'MOVQ'
#
for args in args_lists:
suffix = ""
if (argmodes and not self.is_xmm_insn
and not instrname.startswith('FSTP')
and not instrname.startswith('FLD')):
suffix = suffixes[self.WORD]
# Special case: On 64-bit CPUs, rx86 assumes 64-bit integer
# operands when converting to/from floating point, so we need to
# indicate that with a suffix
if (self.WORD == 8) and (instrname.startswith('CVT') and
'SI' in instrname):
suffix = suffixes[self.WORD]
if instr_suffix is not None:
suffix = instr_suffix # overwrite
following = ""
if False: # instr.indirect:
suffix = ""
if args[-1][0] == i386.REL32: #in (i386.REL8,i386.REL32):
labelcount += 1
following = "\nL%d:" % labelcount
elif args[-1][0] in (i386.IMM8,i386.IMM32):
args = list(args)
args[-1] = ("%d", args[-1][1]) # no '$' sign
else:
suffix += " *"
k = -1
else:
k = len(args)
#for m, extra in args[:k]:
# assert m != i386.REL32 #not in (i386.REL8,i386.REL32)
assembler_operand = self.get_all_assembler_operands()
ops = []
for mode, v in zip(argmodes, args):
ops.append(assembler_operand[mode](v))
ops.reverse()
#
if (instrname.lower() == 'mov' and suffix == 'q' and
ops[0].startswith('$') and 0 <= int(ops[0][1:]) <= 4294967295
and ops[1].startswith('%r')):
# movq $xxx, %rax => movl $xxx, %eax
suffix = 'l'
ops[1] = reduce_to_32bit(ops[1])
#
op = '\t%s%s %s%s' % (instrname.lower(), suffix,
', '.join(ops), following)
g.write('%s\n' % op)
oplist.append(op)
g.write('\t.string "%s"\n' % END_TAG)
g.close()
f, g = os.popen4('as --%d "%s" -o "%s"' %
(self.WORD*8, inputname, filename), 'r')
f.close()
got = g.read()
g.close()
error = [line for line in got.splitlines() if 'error' in line.lower()]
if error:
raise Exception("Assembler got an error: %r" % error[0])
error = [line for line in got.splitlines()
if 'warning' in line.lower()]
if error:
raise Exception("Assembler got a warning: %r" % error[0])
try:
f = open(filename, 'rb')
except IOError:
raise Exception("Assembler did not produce output?")
data = f.read()
f.close()
i = data.find(BEGIN_TAG)
assert i>=0
j = data.find(END_TAG, i)
assert j>=0
as_code = data[i+len(BEGIN_TAG)+1:j]
return oplist, as_code
def make_all_tests(self, methname, modes, args=[]):
if modes:
tests = self.get_all_tests()
m = modes[0]
if m == 'p' and self.WORD == 4:
return []
lst = tests[m]()
random.shuffle(lst)
if methname == 'PSRAD_xi' and m == 'i':
lst = [x for x in lst if 0 <= x <= 31]
result = []
for v in lst:
result += self.make_all_tests(methname, modes[1:], args+[v])
return result
else:
# special cases
if methname in ('ADD_ri', 'AND_ri', 'CMP_ri', 'OR_ri',
'SUB_ri', 'XOR_ri', 'SBB_ri'):
if args[0] == rx86.R.eax:
return [] # ADD EAX, constant: there is a special encoding
if methname in ('CMP8_ri',):
if args[0] == rx86.R.al:
return [] # CMP AL, constant: there is a special encoding
if methname == 'XCHG_rr' and rx86.R.eax in args:
return [] # special encoding
if methname == 'MOV_rj' and args[0] == rx86.R.eax:
return [] # MOV EAX, [immediate]: there is a special encoding
if methname == 'MOV_jr' and args[1] == rx86.R.eax:
return [] # MOV [immediate], EAX: there is a special encoding
if methname == 'MOV8_rj' and args[0] == rx86.R.al:
return [] # MOV AL, [immediate]: there is a special encoding
if methname == 'MOV8_jr' and args[1] == rx86.R.al:
return [] # MOV [immediate], AL: there is a special encoding
return [args]
def get_code_checker_class(self):
class X86_CodeBuilder(CodeCheckerMixin, self.X86_CodeBuilder):
pass
return X86_CodeBuilder
def should_skip_instruction(self, instrname, argmodes):
is_artificial_instruction = (argmodes != '' and argmodes[-1].isdigit())
is_artificial_instruction |= (instrname[-1].isdigit() and
instrname[-1] != '8')
return (
is_artificial_instruction or
# XXX: Can't tests shifts automatically at the moment
(instrname[:3] in ('SHL', 'SAR', 'SHR')) or
# CALL_j is actually relative, so tricky to test
(instrname == 'CALL' and argmodes == 'j') or
# SET_ir must be tested manually
(instrname == 'SET' and argmodes == 'ir') or
# MULTIBYTE_NOPs can't easily be tested the same way
(instrname == 'MULTIBYTE')
)
def should_skip_instruction_bit32(self, instrname, argmodes):
if self.WORD != 8:
# those are tested in the 64 bit test case
return (
# the test suite uses 64 bit registers instead of 32 bit...
(instrname == 'PEXTRQ') or
(instrname == 'PINSRQ')
)
return False
def complete_test(self, methname):
if '_' in methname:
instrname, argmodes = methname.split('_')
else:
instrname, argmodes = methname, ''
if self.should_skip_instruction(instrname, argmodes) or \
self.should_skip_instruction_bit32(instrname, argmodes):
print "Skipping %s" % methname
return
# XXX: ugly way to deal with the differences between 32 and 64 bit
if not hasattr(self.X86_CodeBuilder, methname):
return
# XXX: hack hack hack
if methname == 'WORD':
return
if instrname.endswith('8'):
instrname = instrname[:-1]
if instrname == 'MOVSX' or instrname == 'MOVZX':
instr_suffix = 'b' + suffixes[self.WORD]
instrname = instrname[:-1]
if argmodes[1] == 'r':
argmodes = [argmodes[0], 'r8']
else:
instr_suffix = 'b'
realargmodes = []
for mode in argmodes:
if mode == 'r':
mode = 'r8'
elif mode == 'i':
mode = 'i8'
realargmodes.append(mode)
argmodes = realargmodes
elif instrname == 'CALL' or instrname == 'JMP':
instr_suffix = suffixes[self.WORD] + ' *'
else:
instr_suffix = None
if instrname.find('EXTR') != -1 or \
instrname.find('INSR') != -1 or \
instrname.find('INSERT') != -1 or \
instrname.find('EXTRACT') != -1 or \
instrname.find('SRLDQ') != -1 or \
instrname.find('SHUF') != -1 or \
instrname.find('PBLEND') != -1 or \
instrname.find('CMPP') != -1:
realargmodes = []
for mode in argmodes:
if mode == 'i':
mode = 'i8'
realargmodes.append(mode)
argmodes = realargmodes
print "Testing %s with argmodes=%r" % (instrname, argmodes)
self.methname = methname
self.is_xmm_insn = getattr(getattr(self.X86_CodeBuilder,
methname), 'is_xmm_insn', False)
ilist = self.make_all_tests(methname, argmodes)
oplist, as_code = self.run_test(methname, instrname, argmodes, ilist,
instr_suffix)
cls = self.get_code_checker_class()
cc = cls(as_code, self.accept_unnecessary_prefix)
for op, args in zip(oplist, ilist):
if op:
cc.begin(op)
getattr(cc, methname)(*args)
cc.done()
def setup_class(cls):
import os
g = os.popen('as -version </dev/null -o /dev/null 2>&1')
data = g.read()
g.close()
if not data.startswith('GNU assembler'):
py.test.skip("full tests require the GNU 'as' assembler")
def test_all(self):
for name in rx86.all_instructions:
yield self.complete_test, name
|
py | b40612ff914d58fdfc156c72ef60b1ec2de2e8f0 | from pathlib import Path
from django.test import TestCase
from papermerge.core.models import (
Document,
Folder,
ColoredTag
)
from papermerge.test.utils import create_root_user
# points to papermerge.testing folder
BASE_DIR = Path(__file__).parent
class TestDocument(TestCase):
def setUp(self):
self.user = create_root_user()
def test_basic_document_tagging(self):
doc = Document.create_document(
title="document_c",
file_name="document_c.pdf",
size='1212',
lang='DEU',
user=self.user,
page_count=5,
)
doc.save()
# associate "invoice" and "paid" tags
# boths tags belong to self.user
doc.tags.add(
"invoice",
"paid",
tag_kwargs={"user": self.user}
)
# If you’re filtering on multiple tags, it’s very common to get
# duplicate results,
# because of the way relational databases work. Often
# you’ll want to make use of the distinct() method on QuerySets.
found_docs = Document.objects.filter(
tags__name__in=["paid", "invoice"]
).distinct()
self.assertEquals(
found_docs.count(),
1
)
self.assertEquals(
found_docs.first().title,
"document_c"
)
def test_basic_folder_tagging(self):
folder = Folder.objects.create(
title="Markus",
user=self.user
)
folder.tags.add(
"invoices",
tag_kwargs={"user": self.user}
)
found_folders = Folder.objects.filter(
tags__name__in=["invoices"]
)
self.assertEquals(
found_folders.count(),
1
)
self.assertEquals(
found_folders.first().title,
"Markus"
)
|
py | b406130ce24b03b60b24f896d185217a2a1593f4 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import json
import os
import sys
import numpy as np
import tensorflow as tf
from sqlflow_submitter.db import (buffered_db_writer, connect_with_data_source,
db_generator, parseMaxComputeDSN)
from .fast_predict import FastPredict
from .input_fn import (get_dtype, pai_maxcompute_db_generator,
pai_maxcompute_input_fn, parse_sparse_feature,
parse_sparse_feature_predict)
# Disable Tensorflow INFO and WARNING logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
try:
import sqlflow_models
except:
pass
# TODO(shendiaomo): Remove after we fully upgrade to TF2.0
TF_VERSION_2 = True
TF_VERSION_PARTS = tf.__version__.split(".")
if int(TF_VERSION_PARTS[0]) == 1:
TF_VERSION_2 = False
# Disable Tensorflow INFO and WARNING logs
if TF_VERSION_2:
import logging
tf.get_logger().setLevel(logging.ERROR)
else:
tf.logging.set_verbosity(tf.logging.ERROR)
from .pai_distributed import define_tf_flags, make_distributed_info_without_evaluator, dump_into_tf_config
def keras_predict(estimator, model_params, save, result_table,
feature_column_names, feature_metas, result_col_name,
datasource, select, hdfs_namenode_addr, hive_location,
hdfs_user, hdfs_pass):
classifier = estimator(**model_params)
classifier_pkg = sys.modules[estimator.__module__]
conn = connect_with_data_source(datasource)
def eval_input_fn(batch_size, cache=False):
feature_types = []
for name in feature_column_names:
# NOTE: vector columns like 23,21,3,2,0,0 should use shape None
if feature_metas[name]["is_sparse"]:
feature_types.append((tf.int64, tf.int32, tf.int64))
else:
feature_types.append(get_dtype(feature_metas[name]["dtype"]))
gen = db_generator(conn.driver, conn, select, feature_column_names,
None, feature_metas)
dataset = tf.data.Dataset.from_generator(gen, (tuple(feature_types), ))
ds_mapper = functools.partial(
parse_sparse_feature_predict,
feature_column_names=feature_column_names,
feature_metas=feature_metas)
dataset = dataset.map(ds_mapper).batch(batch_size)
if cache:
dataset = dataset.cache()
return dataset
# NOTE: always use batch_size=1 when predicting to get the pairs of features and predict results
# to insert into result table.
pred_dataset = eval_input_fn(1)
one_batch = next(iter(pred_dataset))
# NOTE: must run predict one batch to initialize parameters
# see: https://www.tensorflow.org/alpha/guide/keras/saving_and_serializing#saving_subclassed_models
classifier.predict_on_batch(one_batch)
classifier.load_weights(save)
pred_dataset = eval_input_fn(1, cache=True).make_one_shot_iterator()
buff_rows = []
column_names = feature_column_names[:]
column_names.append(result_col_name)
with buffered_db_writer(conn.driver, conn, result_table, column_names, 100,
hdfs_namenode_addr, hive_location, hdfs_user,
hdfs_pass) as w:
for features in pred_dataset:
result = classifier.predict_on_batch(features)
result = classifier_pkg.prepare_prediction_column(result[0])
row = []
for idx, name in enumerate(feature_column_names):
val = features[name].numpy()[0][0]
row.append(str(val))
if isinstance(result, np.ndarray) and len(result) > 1:
# NOTE(typhoonzero): if the output dimension > 1, format output tensor
# using a comma separated string. Only available for keras models.
row.append(",".join([str(i) for i in result]))
else:
row.append(str(result))
w.write(row)
del pred_dataset
def estimator_predict(estimator, model_params, save, result_table,
feature_column_names, feature_metas, result_col_name,
datasource, select, hdfs_namenode_addr, hive_location,
hdfs_user, hdfs_pass, is_pai, pai_table):
classifier = estimator(**model_params)
if not is_pai:
conn = connect_with_data_source(datasource)
def fast_input_fn(generator):
feature_types = []
for name in feature_column_names:
if feature_metas[name]["is_sparse"]:
feature_types.append((tf.int64, tf.int32, tf.int64))
else:
feature_types.append(get_dtype(feature_metas[name]["dtype"]))
def _inner_input_fn():
dataset = tf.data.Dataset.from_generator(generator,
(tuple(feature_types), ))
ds_mapper = functools.partial(
parse_sparse_feature_predict,
feature_column_names=feature_column_names,
feature_metas=feature_metas)
dataset = dataset.map(ds_mapper)
dataset = dataset.batch(1).cache()
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
return features
return _inner_input_fn
column_names = feature_column_names[:]
column_names.append(result_col_name)
fast_predictor = FastPredict(classifier, fast_input_fn)
if is_pai:
driver = "pai_maxcompute"
conn = None
pai_table_parts = pai_table.split(".")
formated_pai_table = "odps://%s/tables/%s" % (pai_table_parts[0],
pai_table_parts[1])
predict_generator = pai_maxcompute_db_generator(
formated_pai_table, feature_column_names, None, feature_metas)()
else:
driver = conn.driver
predict_generator = db_generator(conn.driver, conn, select,
feature_column_names, None,
feature_metas)()
with buffered_db_writer(driver, conn, result_table, column_names, 100,
hdfs_namenode_addr, hive_location, hdfs_user,
hdfs_pass) as w:
for features in predict_generator:
result = fast_predictor.predict(features)
row = []
for idx, _ in enumerate(feature_column_names):
val = features[0][idx][0]
row.append(str(val))
if "class_ids" in list(result)[0]:
row.append(str(list(result)[0]["class_ids"][0]))
else:
# regression predictions
row.append(str(list(result)[0]["predictions"][0]))
w.write(row)
def pred(datasource,
estimator,
select,
result_table,
feature_columns,
feature_column_names,
result_col_name,
feature_metas={},
model_params={},
save="",
batch_size=1,
hdfs_namenode_addr="",
hive_location="",
hdfs_user="",
hdfs_pass="",
is_pai=False,
pai_table=""):
if not is_pai:
conn = connect_with_data_source(datasource)
model_params.update(feature_columns)
is_estimator = issubclass(
estimator,
(tf.estimator.Estimator, tf.estimator.BoostedTreesClassifier,
tf.estimator.BoostedTreesRegressor))
if not is_estimator:
if not issubclass(estimator, tf.keras.Model):
# functional model need field_metas parameter
model_params["field_metas"] = feature_metas
keras_predict(estimator, model_params, save, result_table,
feature_column_names, feature_metas, result_col_name,
datasource, select, hdfs_namenode_addr, hive_location,
hdfs_user, hdfs_pass)
else:
if is_pai:
FLAGS = define_tf_flags()
model_params["model_dir"] = FLAGS.checkpointDir
else:
model_params['model_dir'] = save
estimator_predict(estimator, model_params, save, result_table,
feature_column_names, feature_metas, result_col_name,
datasource, select, hdfs_namenode_addr,
hive_location, hdfs_user, hdfs_pass, is_pai,
pai_table)
print("Done predicting. Predict table : %s" % result_table)
|
py | b4061424d1d94167d7fa9b8d1de95c11aba27116 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for OptimizerV2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adadelta
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import nadam
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import util as trackable_utils
_DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64]
# TODO(b/141710709): complex support in NVCC and ROCM.
if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
class OptimizerTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testBasic(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAdaptiveLearningRate(self):
for dtype in _DATA_TYPES:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(1.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
# var0 = [1., 2.] - 1.0 * [5, 5]
self.assertAllClose([-4., -3.], self.evaluate(var0))
# var1 = [3., 4.] - 1.0 * [3, 3]
self.assertAllClose([0., 1.], self.evaluate(var1))
sgd.learning_rate = 0.5
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
# Validate updated params
# var0 = [-4., -3.] - 0.5 * [5, 5]
self.assertAllClose([-6.5, -5.5], self.evaluate(var0))
# var1 = [0., 1.] - 0.5 * [3, 3]
self.assertAllClose([-1.5, -0.5], self.evaluate(var1))
sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testPrecomputedGradient(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
grad_loss = constant_op.constant([42, -42], dtype=dtype)
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoGradients(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoGradientsForAnyVariables_Minimize(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: constant_op.constant(5.0)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoGradientsForAnyVariables_ApplyGradients(self):
for dtype in _DATA_TYPES:
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradientsAsVariables(self):
for i, dtype in enumerate(_DATA_TYPES):
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
variables.Variable(
array_ops.zeros([2], dtype), name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
# Run convert_ops to achieve the gradients converting
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd.apply_gradients(converted_grads_and_vars)
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testComputeGradientsWithTensors(self):
with testing_utils.use_gpu():
x = ops.convert_to_tensor_v2_with_dispatch(1.0)
def f():
return x * x
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(f, [x])
self.assertLen(grads_and_vars, 1)
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd.apply_gradients(grads_and_vars)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with testing_utils.use_gpu():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIterationWithoutMinimize(self):
with testing_utils.use_gpu():
sgd = gradient_descent.SGD(3.0)
self.evaluate(sgd.iterations.initializer)
self.assertEqual(0, self.evaluate(sgd.iterations))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testConfig(self):
with testing_utils.use_gpu():
opt = gradient_descent.SGD(learning_rate=1.0)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
lr = opt._get_hyper('learning_rate')
lr2 = opt2._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
# assert both are equal float values.
self.assertEqual(self.evaluate(lr), self.evaluate(lr2))
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
opt3 = gradient_descent.SGD.from_config(config)
lr3 = opt3._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(lr), self.evaluate(lr3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testConfigWithLearningRateDecay(self):
with testing_utils.use_gpu():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
for decay_schedule in [
learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.1),
learning_rate_schedule.PiecewiseConstantDecay(
[5], [1., .5])
]:
step = 10
opt = gradient_descent.SGD(decay_schedule)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
# assert both are equal float values.
self.assertAllEqual(
decay_schedule(step),
opt._get_hyper('learning_rate')(step))
self.assertAllEqual(
decay_schedule(step),
opt2._get_hyper('learning_rate')(step))
loss = lambda: 3 * var0
# learning rate variable is created when calling minimize.
opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
config = opt.get_config()
opt3 = gradient_descent.SGD.from_config(config)
self.assertAllEqual(
self.evaluate(opt._get_hyper('learning_rate')(step)),
opt3._get_hyper('learning_rate')(step))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradClipValue(self):
with testing_utils.use_gpu():
var = variables.Variable([1.0, 2.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0., 1.], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradClipNorm(self):
with testing_utils.use_gpu():
var = variables.Variable([1.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGradGlobalClipNorm(self):
with testing_utils.use_gpu():
# l2 norm is 5.0
var1 = variables.Variable([1.0])
var2 = variables.Variable([2.0])
loss = lambda: 3 * var1 + 4 * var2
opt = gradient_descent.SGD(learning_rate=1.0, global_clipnorm=2.0)
opt_op = opt.minimize(loss, [var1, var2])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# grad1 = 3.0 * 2.0 / 5.0 = 1.2
self.assertAllClose([-.2], self.evaluate(var1))
# grad2 = 4.0 * 2.0 / 5.0 = 1.6
self.assertAllClose([.4], self.evaluate(var2))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidClipNorm(self):
with self.assertRaisesRegex(ValueError, '>= 0'):
gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
clip_type=['clipnorm', 'global_clipnorm', 'clipvalue']))
def testConfigWithCliping(self, clip_type):
opt = gradient_descent.SGD(learning_rate=1.0, **{clip_type: 2.0})
config = opt.get_config()
opt = gradient_descent.SGD.from_config(config)
self.assertEqual(getattr(opt, clip_type), 2.0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidKwargs(self):
with self.assertRaisesRegex(TypeError, 'Unexpected keyword argument'):
gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testWeights(self):
with testing_utils.use_gpu():
opt1 = adam.Adam(learning_rate=1.0)
var1 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss1 = lambda: 3 * var1
opt_op_1 = opt1.minimize(loss1, [var1])
self.evaluate(variables.global_variables_initializer())
config = opt1.get_config()
opt2 = adam.Adam.from_config(config)
var2 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss2 = lambda: 3 * var2
opt_op_2 = opt2.minimize(loss2, [var2])
weights = opt1.get_weights()
# Assert set_weights and both variables get updated to same value.
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_1, opt_op_2])
self.assertAllClose(self.evaluate(var1), self.evaluate(var2))
self.assertEqual(1, self.evaluate(opt1.iterations))
self.assertEqual(1, self.evaluate(opt2.iterations))
var3 = variables.Variable([1.0, 2.0, 3.0], dtype=dtypes.float32)
var4 = variables.Variable([4.0, 5.0, 6.0], dtype=dtypes.float32)
loss3 = lambda: 3 * var3 + 5 * var4
opt_op_3 = opt1.minimize(loss3, [var3, var4])
# Assert set_weights with ValueError since weight list does not match.
self.evaluate(variables.global_variables_initializer())
weights = opt1.get_weights()
with self.assertRaisesRegex(ValueError, 'but the optimizer was'):
opt2.set_weights(weights)
# Assert set_weights and variables get updated to same value.
var5 = variables.Variable([1.0, 2.0, 3.0], dtype=dtypes.float32)
var6 = variables.Variable([4.0, 5.0, 6.0], dtype=dtypes.float32)
loss4 = lambda: 3 * var5 + 5 * var6
opt_op_4 = opt2.minimize(loss4, [var5, var6])
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_3, opt_op_4])
self.assertAllClose(
self.evaluate([var3, var4]), self.evaluate([var5, var6]))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGettingHyperParameters(self):
with self.test_session():
opt = adam.Adam(learning_rate=1.0)
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
lr = self.evaluate(opt.lr)
self.assertEqual(1.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(3.0))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
with self.assertRaises(AttributeError):
opt.not_an_attr += 3
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testGettingHyperParametersWithLrInConstructor(self):
with self.test_session():
opt = gradient_descent.SGD(lr=3.0)
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt.learning_rate, variables.Variable)
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(4.0))
lr = self.evaluate(opt.lr)
self.assertEqual(4.0, lr)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testOptimizerWithKerasModel(self):
a = input_layer.Input(shape=(3,), name='input_a')
b = input_layer.Input(shape=(3,), name='input_b')
dense = core.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = core.Dropout(0.5, name='dropout')(c)
model = training.Model([a, b], [d, e])
optimizer = gradient_descent.SGD(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testOptimizerWithCallbacks(self):
np.random.seed(1331)
input_np = np.random.random((10, 3))
output_np = np.random.random((10, 4))
a = input_layer.Input(shape=(3,), name='input_a')
model = sequential.Sequential()
model.add(core.Dense(4, kernel_initializer='zeros', name='dense'))
model.add(core.Dropout(0.5, name='dropout'))
model(a)
optimizer = gradient_descent.SGD(learning_rate=0.1)
model.compile(optimizer, loss='mse', metrics=['mae'])
# This does not reduce the LR after the first epoch (due to low delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
# This should reduce the LR after the first epoch (due to high delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def testOptimizerSetIterations(self):
global_step = training_util.get_or_create_global_step()
opt = adam.Adam(learning_rate=1.0)
opt.iterations = global_step
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
self.evaluate(variables.global_variables_initializer())
init_step_value = self.evaluate(global_step)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
new_step_value = self.evaluate(global_step)
self.assertEqual(new_step_value, init_step_value + 1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testOptimizerWithCallableVarList(self):
train_samples = 20
input_dim = 1
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 1
model = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes)
opt = adam.Adam()
loss = lambda: losses.mean_squared_error(model(x), y)
var_list = lambda: model.trainable_weights
with self.assertRaisesRegex(
ValueError, 'Weights for model .* have not yet been created'):
var_list()
train_op = opt.minimize(loss, var_list)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
[[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
self.evaluate(train_op)
self.assertNotEqual(
[[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
self.assertLen(var_list(), 4)
def testVarKey(self):
with ops.get_default_graph().as_default():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
self.assertTrue(a._in_graph_mode)
self.assertTrue(b._in_graph_mode)
var_key = optimizer_v2._var_key(a)
self.assertEqual('var', var_key)
var_key = optimizer_v2._var_key(b)
self.assertEqual('var_1', var_key)
def testVarName(self):
with ops.get_default_graph().as_default():
var = variables.Variable([1., 2.], name='var')
loss = var + 1.
opt = adam.Adam()
opt.get_updates(loss, [var])
opt_vars = opt.variables()
self.assertLen(opt_vars, 3)
self.assertEqual('Adam/iter:0', opt_vars[0].name)
self.assertEqual('Adam/var/m:0', opt_vars[1].name)
var_2 = variables.Variable([1., 2.], name='var_2')
loss = var_2 + 1.
with backend.name_scope('outter'):
opt.get_updates(loss, [var_2])
opt_vars = opt.variables()
self.assertLen(opt_vars, 5)
self.assertEqual('outter/Adam/var_2/m:0', opt_vars[3].name)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testEmptyVarList(self):
opt = gradient_descent.SGD(1.)
opt.minimize(lambda: constant_op.constant(1.), [])
opt.apply_gradients([])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAggregationTrue(self):
# Test that experimental_aggregate_gradients=True works without distributed
# strategy.
var = variables.Variable([1., 2.])
opt = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1., 2.], self.evaluate(var))
opt_op = opt.apply_gradients([([0.1, 0.1], var)],
experimental_aggregate_gradients=True)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testAggregationFalse(self):
# Test that experimental_aggregate_gradients=False works without distributed
# strategy.
var = variables.Variable([1., 2.])
opt = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1., 2.], self.evaluate(var))
opt_op = opt.apply_gradients([([0.1, 0.1], var)],
experimental_aggregate_gradients=False)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@combinations.generate(combinations.combine(mode=['eager']))
def testRestoringIterationsWithoutAnOptimizer(self):
opt = gradient_descent.SGD(3.0)
opt.iterations.assign(5)
checkpoint = trackable_utils.Checkpoint(optimizer=opt)
path = checkpoint.save(self.get_temp_dir())
# Following verifies that the `iterations` can be restored with the absence
# of an `Optimizer` object (using a `Checkpoint` as a placeholder).
iterations_var = variables.Variable(0, dtype=dtypes.int64)
optimizer_checkpoint = trackable_utils.Checkpoint(iter=iterations_var)
checkpoint_to_restore = trackable_utils.Checkpoint(
optimizer=optimizer_checkpoint)
checkpoint_to_restore.restore(path)
self.assertEqual(5, self.evaluate(iterations_var))
@keras_parameterized.run_all_keras_modes
class OptimizersCompatibilityTest(keras_parameterized.TestCase):
def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with testing_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v1.compile(
opt_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_v1.fit(x, y, batch_size=5, epochs=1)
model_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v2.set_weights(model_v1.get_weights())
model_v2.compile(
opt_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
if not ops.executing_eagerly_outside_functions():
model_v2._make_train_function()
if test_weights:
opt_v2.set_weights(opt_v1.get_weights())
hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
rtol=1e-5, atol=1e-5)
self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
rtol=1e-5, atol=1e-5)
def testAdadeltaCompatibility(self):
opt_v1 = optimizers.Adadelta(lr=0.01)
opt_v2 = adadelta.Adadelta(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdagradCompatibility(self):
opt_v1 = optimizers.Adagrad(lr=0.01)
opt_v2 = adagrad.Adagrad(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamCompatibility(self):
opt_v1 = optimizers.Adam()
opt_v2 = adam.Adam()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamaxCompatibility(self):
opt_v1 = optimizers.Adamax(lr=0.01)
opt_v2 = adamax.Adamax(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testNadamCompatibility(self):
opt_v1 = optimizers.Nadam(lr=0.001)
opt_v2 = nadam.Nadam(learning_rate=0.001)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testMomentumCompatibility(self):
opt_v1 = optimizers.SGD(lr=0.01, momentum=0.9)
opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testRMSpropCompatibility(self):
opt_v1 = optimizers.RMSprop()
opt_v2 = rmsprop.RMSprop()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testSGDCompatibility(self):
opt_v1 = optimizers.SGD(lr=0.01)
opt_v2 = gradient_descent.SGD(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2, False)
def testNumericEquivalenceForNesterovMomentum(self):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with testing_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
model_tf = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_tf.set_weights(model_k_v2.get_weights())
opt_k_v1 = optimizers.SGD(momentum=0.9, nesterov=True)
opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
opt_tf = momentum.MomentumOptimizer(
learning_rate=0.01, momentum=0.9, use_nesterov=True)
model_k_v1.compile(
opt_k_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_tf.compile(
opt_tf,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
def testNumericEquivalenceForAmsgrad(self):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with testing_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
opt_k_v1 = optimizers.Adam(amsgrad=True)
opt_k_v2 = adam.Adam(amsgrad=True)
model_k_v1.compile(
opt_k_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
# Note: These tests are kept in a separate class to avoid bugs in some
# distributions of Python that break AutoGraph which is used by tf.function.
@combinations.generate(combinations.combine(mode=['eager']))
class OptimizerWithFunctionTest(test.TestCase, parameterized.TestCase):
def testBasic(self):
var = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@def_function.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0., 1.], fn(), atol=1e-4)
self.assertAllClose([-1, 0.], fn(), atol=1e-4)
def testVarKeyWithVarCreatedInEager(self):
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
@test_util.also_run_as_tf_function
def var_key_test():
self.assertFalse(a._in_graph_mode)
self.assertFalse(b._in_graph_mode)
var_key_a = optimizer_v2._var_key(a)
self.assertStartsWith(var_key_a, 'var_')
var_key_b = optimizer_v2._var_key(b)
self.assertStartsWith(var_key_b, 'var_')
self.assertNotEqual(var_key_a, var_key_b)
var_key_test()
def testLearningRateDecayUsedInTwoFunctions(self):
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
learning_rate_decay = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
opt = adam.Adam(learning_rate=learning_rate_decay)
loss_a = lambda: 3 * a
loss_b = lambda: 2 * b
@def_function.function
def fn_a():
opt.minimize(loss_a, [a])
return a
@def_function.function
def fn_b():
opt.minimize(loss_b, [b])
return b
fn_a()
fn_b()
_NUM_LEARNERS = 50
APPLY_SCOPE = 'debug_apply'
ALLOWLIST = [
# optimizer_v2._deduplicate_indexed_slices contains an indexed slice:
# array_ops.shape(unique_indices)[0]
# which winds up expanding to [0:1:1] thereby creating three constants
# to represent the indices.
('embeddings/strided_slice/stack', 'Const'),
]
def get_inputs(op):
op_inputs = list(op.inputs) + op.control_inputs
names = [i.name for i in op_inputs]
op_inputs = [getattr(i, 'op', i) for i in op_inputs]
return op_inputs, names
def strip_name(node):
if 'Placeholder' in node.op:
return
node.name = ''
def topological_sort(graph):
graph_ops = graph.get_operations()
sources = []
result = []
inputs = {}
outputs = collections.defaultdict(set)
for op in graph_ops:
op_inputs = get_inputs(op)[0]
if not op_inputs:
sources.append(op)
inputs[op] = set(op_inputs)
for i in op_inputs:
outputs[i].add(op)
while sources:
op = sources.pop()
for op_output in outputs[op]:
inputs[op_output].remove(op)
if not inputs[op_output]:
sources.append(op_output)
result.append(op)
# Check correctness.
if len(result) != len(graph_ops):
raise ValueError('Sort result has {} ops, source graph has {}.'
.format(len(result), len(graph_ops)))
sort_check_seen = set()
for op in result:
sort_check_seen.add(op)
for i in get_inputs(op)[0]:
assert i in sort_check_seen
return result
def identify_redundant_ops(graph):
"""Implements basic common subexpression elimination.
This is not intended to replicate the graph semantics of TensorFlow Graphs
(for instance it does not handle stateful op ordering), nor is it intended to
replace the common subexpression elimination Grappler pass. Rather, it
provides a high level sanity check that clearly redundant ops are not being
created.
Args:
graph: The graph to be analyzed.
Returns:
A count of the duplicate ops and a description of the structure of each.
"""
sorted_ops = topological_sort(graph)
duplicates = collections.defaultdict(list)
unified_node_defs = {}
name_map = {}
for op in sorted_ops:
input_names = []
for op_input, name in zip(*get_inputs(op)):
input_def = op_input.node_def
# Operations can have multiple outputs. We track which is used to prevent
# overzealous elimination.
input_def.name = name
input_def.input[:] = [name_map.get(i, i) for i in input_def.input]
strip_name(input_def)
# NodeDef.SerializeToString() does not provide identical serialized
# representations for identical NodeDefs, so we instead use string
# representation as a dict key.
key = repr(input_def)
if key in unified_node_defs:
input_names.append(unified_node_defs[key])
else:
unified_node_defs[key] = op_input.name
input_names.append(name)
node_def = op.node_def
node_def.input[:] = input_names
strip_name(node_def)
key = repr(node_def)
duplicates[key].append(op)
name_map[op.name] = duplicates[key][0].name
num_duplicates = 0
duplicate_types = []
for standard_def, op_defs in duplicates.items():
# We are only interested in testing the apply method of the optimizer
op_defs = [i for i in op_defs if APPLY_SCOPE in i.name]
# We only check for per-apply redundant ops.
if len(op_defs) < _NUM_LEARNERS:
continue
# Certain ops are simply not worth eliminating, and are instead simply
# ignored.
name, op_type = op_defs[0].name, op_defs[0].type
if any(allowlisted_scope in name and op_type == allowlisted_type
for allowlisted_scope, allowlisted_type in ALLOWLIST):
continue
num_duplicates += len(op_defs)
traceback = []
for level in op_defs[0].traceback:
traceback.append(' {} {}:{}'.format(level[0], level[2], level[1]))
duplicate_types.append(
'# Example name: {}\n# Op creation stack:\n{}\n{}'.format(
op_defs[0].name,
'\n'.join(traceback),
standard_def))
return num_duplicates, duplicate_types
def make_model():
r"""Constructs a simple ensemble of weak learners model.
--------- --------- --------- ---------
| Input | | Input | ... | Input | | Input |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Embed | | Embed | ... | Embed | | Embed |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Dense | | Dense | ... | Dense | | Dense |
--------- --------- --------- ---------
\ | | /
\ | | /
---------------------------------------------
|
---------
| Dense |
---------
This topology is chosen because it exercises both dense and sparse update
paths.
Returns:
A model for testing optimizer coefficient reuse.
"""
inputs = []
intermediates = []
for _ in range(_NUM_LEARNERS):
inp = keras.layers.Input(shape=(1,), dtype=dtypes.int32)
layer = keras.layers.Embedding(1, 4)(inp)
layer = keras.layers.Dense(1)(layer)
inputs.append(inp)
intermediates.append(layer)
layer = keras.layers.Concatenate(axis=-1)(intermediates)
layer = keras.layers.Dense(1)(layer)
return keras.models.Model(inputs, layer)
COEFFICIENT_PARAMS = (
('Adadelta', adadelta.Adadelta, None),
('Adagrad', adagrad.Adagrad, None),
('Adam', adam.Adam, None),
('Adam_amdgrad', adam.Adam, dict(amsgrad=True)),
('Adamax', adamax.Adamax, None),
('Ftrl', ftrl.Ftrl, None),
('Ftrl_l2_shrinkage', ftrl.Ftrl,
dict(l2_shrinkage_regularization_strength=0.1)),
('SGD', gradient_descent.SGD, None),
('SGD_momentum', gradient_descent.SGD, dict(momentum=0.5)),
('Nadam', nadam.Nadam, None),
('RMSprop', rmsprop.RMSprop, None),
('RMSprop_centered', rmsprop.RMSprop, dict(centered=True)),
('RMSprop_momentum', rmsprop.RMSprop, dict(momentum=0.5)),
('RMSprop_momentum_centered', rmsprop.RMSprop,
dict(momentum=0.5, centered=True)),
)
class OptimizerCoefficientTest(keras_parameterized.TestCase):
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_duplicate_ops(self, optimizer_class, init_kwargs=None):
init_kwargs = init_kwargs or {}
optimizer = optimizer_class(**init_kwargs)
graph = ops.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
num_duplicates, duplicate_types = identify_redundant_ops(graph)
if num_duplicates:
# Avoid spamming logs.
if len(duplicate_types) > 3:
duplicate_types = duplicate_types[:3] + ['...']
num_total = len(graph.get_operations())
raise ValueError('{} of {} ({:.1f}%) ops were duplicates:\n\n{}'.format(
num_duplicates, num_total, num_duplicates / num_total * 100,
'\n'.join(duplicate_types)))
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_subclass_compat(self, optimizer_class, init_kwargs=None):
"""Ensure that subclassed optimizers without apply_state still work."""
class SubclassedOptimizer(optimizer_class):
def _resource_apply_dense(self, grad, var): # pylint: disable=useless-super-delegation
return super(SubclassedOptimizer, self)._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices): # pylint: disable=useless-super-delegation
return super(SubclassedOptimizer, self)._resource_apply_sparse(
grad, var, indices)
init_kwargs = init_kwargs or {}
optimizer = SubclassedOptimizer(**init_kwargs)
graph = ops.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
if __name__ == '__main__':
test.main()
|
py | b406157f6f3e7dfb27c7e613e6ec761cbda4c3df | # -*- coding: utf-8 -*-
"""Black Friday Dataset-Linear Regression Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1IJdM0lsxz7fljcJCwSWB-lYrSntXv0YV
- In this we are going to predict the purchasing amount of customers during Black Friday, using various features such as age, gender, marital status.
- The dataset we are going to use is the Black Friday dataset from Kaggle which contains about 550068 rows and 12 features that can be downloaded
- The dataset contains the labels which we have to predict and the labels are continuous. So the problem we have is a Supervised Regression type.
## Import libraries
"""
# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
"""## Importing dataset"""
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
"""## Descriptive analysis"""
# Preview training dataset
train.head()
# Preview testing dataset
test.head()
# Training dataset dimensions - (rows, columns)
print('Training data: \nRows: {} Columns: {}'.format(train.shape[0], train.shape[1]))
train.shape
test.shape
# Features data-type
train.info()
train.describe()
# Statistical summary
train.describe().T
train.apply(lambda x: len(x.unique()))
"""## Checking for Null values"""
train.isnull()
train.isnull().sum()
# Checking for Null values
round((train.isnull().sum() / train.shape[0]) * 100, 2).astype(str) + ' %'
"""## Age :"""
train['Age'].value_counts()
# Checking the % counts of unique values
round((train['Age'].value_counts(normalize = True).mul(100)), 2).astype(str) + ' %'
"""## Stay_In_Current_City_Years :"""
train['Stay_In_Current_City_Years'].value_counts()
# Checking the % counts of unique values
round((train['Stay_In_Current_City_Years'].value_counts(normalize = True).mul(100)), 2).astype(str) + ' %'
train['Product_Category_1'].value_counts()
round((train['Product_Category_1'].value_counts(normalize = True).mul(100)), 2).astype(str) + ' %'
round((train['Product_Category_2'].value_counts(normalize = True).mul(100)), 2).astype(str) + ' %'
round((train['Product_Category_3'].value_counts(normalize = True).mul(100)), 2).astype(str) + ' %'
"""## Observations :
- The feature 'Product_Category_2' contains 31.57% null values which can be imputed whereas 'Product_Category_3' contains 69.67% null values so we can drop this feature.
- The features 'Age' and 'Stay_In_Current_City_Years' contain some values which have '+' in them which need to be replaced.
## Exploratory Data Analysis:
### Univariate Analysis:
#### 'Purchase':
"""
plt.style.use('ggplot')
plt.figure(figsize = (20, 7))
sns.distplot(train['Purchase'], bins = 20, color='GREEN')
plt.title('Distribution of Purchase Amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 30, 'fontweight' : 'bold'})
plt.xlabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20})
plt.ylabel('Number of people', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20})
plt.tick_params(labelsize = 15)
plt.show()
"""### Creating a barplot for 'Gender'"""
gen = train['Gender'].value_counts()
plt.figure(figsize = (6, 6))
plt.style.use('seaborn-whitegrid')
sns.barplot(gen.index, gen.values, palette = 'bright')
plt.title('Distribution of Gender', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20, 'fontweight' : 'bold'})
plt.xlabel('Gender', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.ylabel('Number of people', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.tick_params(labelsize = 10)
plt.show()
"""### Creating a pie chart for 'City Category'"""
city = train['City_Category'].value_counts()
plt.style.use('seaborn')
plt.figure(figsize = (10, 7))
plt.pie(city.values, labels = city.index, startangle = 30, explode = (0 , 0.20, 0), shadow = True, autopct = '%1.1f%%')
plt.title('City category distribution', fontdict = {'fontname' : 'Monospace', 'fontsize' : 30, 'fontweight' : 'bold'})
plt.legend()
plt.legend(prop = {'size' : 20})
plt.axis('equal')
plt.show()
"""### Creating a donut chart for 'Age'"""
age = train['Age'].value_counts()
plt.style.use('bmh')
plt.figure(figsize = (10, 5))
plt.pie(age.values, labels = age.index, startangle = 50, autopct = '%1.1f%%')
centre_circle = plt.Circle((0, 0), 0.7, fc = 'white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
plt.title('Age distribution', fontdict = {'fontname' : 'Monospace', 'fontsize' : 30, 'fontweight' : 'bold'})
plt.axis('equal')
plt.legend(prop = {'size' : 20})
plt.show()
"""### Creating a barplot for 'Occupation'"""
occupation = train['Occupation'].value_counts()
plt.figure(figsize = (20, 6))
plt.style.use('seaborn-whitegrid')
sns.barplot(occupation.index, occupation.values, palette = 'Paired')
plt.title('Occupation wise Distribution', fontdict = {'fontname' : 'Monospace', 'fontsize' : 30, 'fontweight' : 'bold'})
plt.xlabel('Occupation code', fontdict = {'fontname' : 'Monospace', 'fontsize' : 25})
plt.ylabel('Number of people', fontdict = {'fontname' : 'Monospace', 'fontsize' : 25})
plt.tick_params(labelsize = 20)
plt.show()
"""### Creating a countplot for 'Marital Status'"""
plt.style.use('seaborn')
plt.figure(figsize = (7, 6))
sns.countplot(train['Marital_Status'])
plt.title('Marital Status Distribution', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20, 'fontweight' : 'bold'})
plt.xlabel('Marital Status', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.ylabel('Number of people', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.tick_params(labelsize = 10)
plt.show()
"""### Creating a Treemap for 'Stay_In_Current_City_Years'"""
stay = train['Stay_In_Current_City_Years'].value_counts()
import squarify
plt.style.use('default')
plt.figure(figsize = (7, 5))
squarify.plot(sizes = stay.values, label = stay.index, value = stay.values)
plt.title('Stay in current city distribution', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20, 'fontweight' : 'bold'})
plt.show()
"""### Bivariate Analysis:
### Creating a barplot of 'Occupation vs Purchase'
"""
plt.figure(figsize = (10, 5))
plt.style.use('seaborn')
sns.barplot(train['Occupation'], train['Purchase'], palette = 'Paired')
plt.title('Purchase amount across Occupation', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20, 'fontweight' : 'bold'})
plt.xlabel('Occupation code', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.ylabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.tick_params(labelsize = 15)
plt.show()
"""### Creating a boxplot of 'Age vs Purchase'"""
plt.style.use('bmh')
sns.boxplot(train['Age'], train['Purchase'], palette = 'colorblind')
plt.title('Purchase amount across Age', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20, 'fontweight' : 'bold'})
plt.xlabel('Age', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.ylabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.tick_params(labelsize = 10)
plt.grid()
plt.show()
"""### Creating a barplot of 'Gender vs Purchase'"""
plt.style.use('default')
sns.barplot(train['Gender'], train['Purchase'])
plt.title('Purchase amount across Gender', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15, 'fontweight' : 'bold'})
plt.xlabel('Gender', fontdict = {'fontname' : 'Monospace', 'fontsize' : 10})
plt.ylabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 10})
plt.tick_params(labelsize = 7)
plt.show()
"""### Creating a barplot of 'City_Category vs Purchase'"""
plt.style.use('bmh')
sns.barplot(train['City_Category'], train['Purchase'])
plt.title('Purchase amount across City Category', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15, 'fontweight' : 'bold'})
plt.xlabel('City Category', fontdict = {'fontname' : 'Monospace', 'fontsize' : 10})
plt.ylabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 10})
plt.tick_params(labelsize = 7)
plt.show()
"""### Creating a barplot of 'Product_Category_1 vs Purchase'"""
plt.style.use('default')
plt.figure(figsize = (20, 7))
sns.barplot(train['Product_Category_1'], train['Purchase'], palette = 'colorblind')
plt.title('Purchase amount across Product Category', fontdict = {'fontname' : 'Monospace', 'fontsize' : 20, 'fontweight' : 'bold'})
plt.xlabel('Product Category_1', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.ylabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15})
plt.tick_params(labelsize = 20)
plt.grid()
plt.show()
"""### Creating a barplot of 'Marital Status vs Purchase'"""
plt.style.use('ggplot')
plt.figure(figsize = (6, 3))
sns.barplot(train['Marital_Status'], train['Purchase'])
plt.title('Purchase amount across Marital Status', fontdict = {'fontname' : 'Monospace', 'fontsize' : 15, 'fontweight' : 'bold'})
plt.xlabel('Marital Status', fontdict = {'fontname' : 'Monospace', 'fontsize' : 10})
plt.ylabel('Purchase amount', fontdict = {'fontname' : 'Monospace', 'fontsize' : 10})
plt.tick_params(labelsize = 7)
plt.show()
"""### Multivariate Analysis:"""
# Creating a Pairplot for all features
plt.style.use('default')
sns.pairplot(train)
plt.show()
# Creating a heatmap of correlation matrix
sns.heatmap(train.corr(), annot = True)
plt.show()
"""- An interesting observation can be made from the gender distribution plot that the number of females was less than the number of men who shopped during Black Friday.
- From the correlation heatmap, we can observe that the dependent feature 'Purchase' is highly correlated with 'Product_Category_1' and 'Product_Category_2'.
## Data Preprocessing:
### Merging of train and test
"""
train['source'] = 'train'
test['source'] = 'test'
dataset = pd.concat([train, test])
# Creating a heatmap of correlation matrix
sns.heatmap(dataset.corr(), annot = True)
plt.show()
"""### Replacing '+' in 'Age' and 'Stay_In_Current_City_Years'"""
dataset['Age'] = dataset['Age'].apply(lambda x : str(x).replace('55+', '55'))
dataset['Stay_In_Current_City_Years'] = dataset['Stay_In_Current_City_Years'].apply(lambda x : str(x).replace('4+', '4'))
"""### Dropping irrelevant features:"""
dataset.drop('Product_Category_3', axis = 1, inplace = True)
dataset.drop('Product_Category_2',axis=1,inplace=True)
dataset.drop('User_ID', axis = 1, inplace = True)
dataset.drop('Product_ID', axis = 1, inplace = True)
dataset
dataset["Age"].unique()
dataset.shape
"""### Feature Encoding:"""
from sklearn.preprocessing import LabelEncoder
label_encoder_gender = LabelEncoder()
dataset['Gender'] = label_encoder_gender.fit_transform(dataset['Gender'])
dataset['Gender']
label_encoder_age = LabelEncoder()
dataset['Age'] = label_encoder_age.fit_transform(dataset['Age'])
label_encoder_city = LabelEncoder()
dataset['City_Category'] = label_encoder_city.fit_transform(dataset['City_Category'])
"""### Converting 'Stay_In_Current_City_Years' into numeric data type"""
dataset['Stay_In_Current_City_Years'] = dataset['Stay_In_Current_City_Years'].astype('int')
"""### Separating Dataset into train and test:"""
train = dataset.loc[dataset['source'] == 'train']
test = dataset.loc[dataset['source'] == 'test']
train.drop('source', axis = 1, inplace = True)
test.drop('source', axis = 1, inplace = True)
"""### Separating train into X and Y"""
X = train.drop("Purchase", axis = 1)
Y = train["Purchase"]
"""### Creating a train test split"""
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state = 123)
print("X_train shape:", X_train.shape)
print("X_test shape:", X_test.shape)
print("Y_train shape:", Y_train.shape)
print("Y_test shape:", Y_test.shape)
X_train.head()
"""## ML Model Implementation:
### Linear Regression:
"""
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train, Y_train)
Y_pred_lin_reg = lin_reg.predict(X_test)
len(Y_pred_lin_reg)
Y_pred_lin_reg
X_test.head()
features=["0","3","12","2","1","0","11"]
int_features = [int(x) for x in features]
final_features = [np.array(int_features)]
Y_predicted = lin_reg.predict(final_features)
print(Y_predicted)
import pickle
pickle.dump(lin_reg,open(r"C:\Users\DELL\OneDrive\Desktop\Desktop Files\Practice\Projects\Black Friday Datset\LinearModel.pkl","wb"))
LinearModel=pickle.load(open(r"C:\Users\DELL\OneDrive\Desktop\Desktop Files\Practice\Projects\Black Friday Datset\LinearModel.pkl","rb"))
Y_predicted1=LinearModel.predict(final_features)
Y_predicted1
|
py | b40615b7ebce782dd5f86735c34644c60f50220b | import numpy as np
from torch import nn
from torch.nn import functional as F
from torchvision.models.inception import inception_v3
from scipy.stats import entropy
class InceptionScore(nn.Module):
def __init__(self, device, batch_size=32, resize=False):
super(InceptionScore, self).__init__()
assert batch_size > 0
self.resize = resize
self.batch_size = batch_size
self.device = device
# Load inception model
self.inception_model = inception_v3(pretrained=True, transform_input=False).to(self.device)
self.inception_model.eval()
self.clean()
def clean(self):
self.preds = np.zeros((0, 1000))
def get_pred(self, x):
if self.resize:
# x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
x = F.interpolate(x, size=(299, 299), mode='bilinear')
x = self.inception_model(x)
return F.softmax(x, dim=1).data.cpu().numpy()
def forward(self, imgs):
# Get predictions
preds_imgs = self.get_pred(imgs.to(self.device))
self.preds = np.append(self.preds, preds_imgs, axis=0)
def compute_score(self, splits=1):
# Now compute the mean kl-div
split_scores = []
preds = self.preds
N = self.preds.shape[0]
for k in range(splits):
part = preds[k * (N // splits): (k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
|
py | b40615ed90552789cef50c77188648c8c91b485f | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from models import (Team, Event, Group, Competition, LoginForm, Match,
TeamResult, MatchSaveForm)
from django.test.client import Client
# class SimpleTest(TestCase):
# def test_basic_addition(self):
# """
# Tests that 1 + 1 always equals 2.
# """
# self.assertEqual(1 + 1, 2)
# class ModelTest(TestCase):
# def test_models(self):
# self.teamA = Team.objects.create(name="testTeamA")
# self.teamB = Team.objects.create(name="testTeamB")
# self.resultA = TeamResult.objects.create(team=self.teamA)
# self.resultB = TeamResult.objects.create(team=self.teamB)
# #self.match = Match.objects.create(teamA=self.teamA, teamB=self.teamB, referee=)
# #self.group = Group.objects.create(name="testGroup", teams=[self.teamA, self.teamB, matches=[], results=[self.resultA, self.resultB])
# #self.competition = Competition.objects.create(name="testCompetition", groups=[self.group])
# #self.event = Event.objects.create(name="testEvent", competitions=[self.competition])
# self.assertEqual(self.teamA.name, "testTeamA")
class LoginTest(TestCase):
def setUp(self):
self.client = Client()
def test_basic_login_page(self):
response = self.client.get('/login/')
# check if page is OK
self.assertEqual(response.status_code, 200)
# check if we have a form
self.assertTrue('form' in response.context)
self.assertTrue(isinstance(response.context['form'], LoginForm))
# check if we get to index (/) after logging in
self.assertTrue('next' in response.context)
self.assertEqual(response.context['next'], '/')
def test_login_page_with_forward(self):
response = self.client.get('/login/?next=/events')
# check if page is OK
self.assertEqual(response.status_code, 200)
# check if we have a form
self.assertTrue('form' in response.context)
self.assertTrue(isinstance(response.context['form'], LoginForm))
# check if we get to events (/events) after logging in
self.assertTrue('next' in response.context)
self.assertEqual(response.context['next'], '/events')
|
py | b40615fe72ecd763c96d10ba4cf9e88e0145540b | """Huawei LTE sensor tests."""
import pytest
from openpeerpower.components.huawei_lte import sensor
from openpeerpower.const import (
SIGNAL_STRENGTH_DECIBELS,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
@pytest.mark.parametrize(
("value", "expected"),
(
("-71 dBm", (-71, SIGNAL_STRENGTH_DECIBELS_MILLIWATT)),
("15dB", (15, SIGNAL_STRENGTH_DECIBELS)),
(">=-51dBm", (-51, SIGNAL_STRENGTH_DECIBELS_MILLIWATT)),
),
)
def test_format_default(value, expected):
"""Test that default formatter copes with expected values."""
assert sensor.format_default(value) == expected
|
py | b40616757d2eaef423b482cffa2e7c6429b69cca | # swift_build_support/products/product.py -----------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
class Product(object):
@classmethod
def product_name(cls):
"""product_name() -> str
The identifier-style name to use for this product.
"""
return cls.__name__.lower()
@classmethod
def get_build_directory_name(cls, host_target):
return "{}-{}".format(cls.product_name(),
host_target.name)
def __init__(self, args, toolchain, source_dir, build_dir):
self.args = args
self.toolchain = toolchain
self.source_dir = source_dir
self.build_dir = build_dir
self.cmake_options = []
|
py | b406173271a993bb7532d7805170ce8dea159574 |
import curses
import logging.config
VERSION = 1.0
MOTD01 = "-----------------------------------------------------------\n"
MOTD01 += " ____ _ _ ____ ____ _____ ____ ____ __ __ ____ \n"
MOTD01 += " / ___| | | | _ \/ ___|| ____/ ___| / ___| \/ | _ \ \n"
MOTD01 += " | | | | | | |_) \___ \| _| \___ \| | | |\/| | | | |\n"
MOTD01 += " | |___| |_| | _ < ___) | |___ ___) | |___| | | | |_| |\n"
MOTD01 += " \____|\___/|_| \_\____/|_____|____/ \____|_| |_|____/ \n"
MOTD01 += " Command Line Application Template Version + " + str(VERSION) + "\n"
MOTD01 += "-----------------------------------------------------------\n"
MOTD02 = "Commands: \n"
MOTD02 += "<H>ELP, <S>TATUS, <T>IMER, <E>CHO, \n"
MOTD02 += "<R>ECONNECT, <D>ISCONNECT, <Q>UIT/EXIT\n"
def disp(stdscr, current_line, text, newLine = False):
curses.echo()
# if current_line moves stuff off screen, move the screen first
# by that many text lines before display
actual_current_line = current_line
max_y, max_x = stdscr.getmaxyx()
if current_line >= max_y - 1:
actual_current_line = min(max_y-1, current_line)
stdscr.move(1, 0)
stdscr.refresh()
if newLine:
stdscr.addstr(actual_current_line, 0, text + "\n")
else:
stdscr.addstr(actual_current_line, 0, text)
# display the text
stdscr.refresh()
def increment(stdscr, in_line):
max_y, max_x = stdscr.getmaxyx()
return min(max_y-1, in_line + 1)
def my_raw_input(stdscr, r, prompt_string):
input = stdscr.getstr(r, len(prompt_string), 20)
return input # ^^^^ reading input at next line
if __name__ == "__main__":
# Configure the logger
# loggerConfigFileName: The name and path of your configuration file
loggerConfigFileName = "./logconfig.txt"
logging.config.fileConfig(loggerConfigFileName)
# Create the logger
# Admin_Client: The name of a logger defined in the config file
myLogger = logging.getLogger('APP' + str(VERSION))
#myLogger.debug(msg)
myLogger.info("Initialized APP" + str(VERSION))
#myLogger.warn(msg)
#myLogger.error(msg)
#myLogger.critical(msg)
stdscr = curses.initscr()
stdscr.scrollok(1)
stdscr.idlok(1)
stdscr.idcok(1)
stdscr.keypad(True)
stdscr.clear()
disp(stdscr, 0, MOTD01)
disp(stdscr,len(MOTD01.split("\n"))+1, MOTD02)
MOTD_line_count = len(MOTD01.split("\n")) + len(MOTD02.split("\n"))
prompt = "APP" + str(VERSION) + " <"+ "IN[off] OUT[off]" + "> "
keep_going = True
return_prompt = True
cycling_history = False
max_y, max_x = stdscr.getmaxyx()
current_line = MOTD_line_count + 1
while keep_going:
curses.echo()
myLogger.debug("current_line after prompt display: " + str(current_line))
myLogger.debug("screen height before prompt display: " + str(max_y))
if return_prompt:
disp(stdscr, current_line, prompt)
choice = my_raw_input(stdscr, current_line, prompt).lower().strip()
current_line = increment(stdscr, current_line)
if choice == "help" or choice == "h":
pass
elif choice == "timer" or choice == "t":
pass
elif choice == "reconnect" or choice == "r":
pass
elif choice == "disconnect" or choice == "d":
pass
elif choice == "status" or choice == "s":
disp(stdscr, current_line, "status called", newLine=True)
myLogger.debug("current_line after status called: " + str(current_line))
myLogger.debug("screen height after status called: " + str(max_y))
current_line = increment(stdscr, current_line)
return_prompt = True
elif choice == "":
return_prompt = True
pass
elif choice == "quit" or choice == "exit" or choice == "q":
keep_going = False
else:
disp(stdscr, current_line, "Invalid Input. Type <help> for commands.", newLine=True)
current_line = increment(stdscr, current_line)
return_prompt = True
myLogger.info("Shutting Down APP" + str(VERSION))
curses.endwin()
logging.shutdown()
|
py | b406173bb3a4cd6d2f96fd9477cb3ebfe6e4535c | from pathlib import Path
from unittest import TestCase
from clvm_tools.clvmc import compile_clvm
from stai.types.blockchain_format.program import Program, SerializedProgram
wallet_program_files = set(
[
"stai/wallet/puzzles/calculate_synthetic_public_key.clvm",
"stai/wallet/puzzles/cc.clvm",
"stai/wallet/puzzles/stailisp_deserialisation.clvm",
"stai/wallet/puzzles/rom_bootstrap_generator.clvm",
"stai/wallet/puzzles/generator_for_single_coin.clvm",
"stai/wallet/puzzles/genesis-by-coin-id-with-0.clvm",
"stai/wallet/puzzles/genesis-by-puzzle-hash-with-0.clvm",
"stai/wallet/puzzles/lock.inner.puzzle.clvm",
"stai/wallet/puzzles/p2_conditions.clvm",
"stai/wallet/puzzles/p2_delegated_conditions.clvm",
"stai/wallet/puzzles/p2_delegated_puzzle.clvm",
"stai/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.clvm",
"stai/wallet/puzzles/p2_m_of_n_delegate_direct.clvm",
"stai/wallet/puzzles/p2_puzzle_hash.clvm",
"stai/wallet/puzzles/rl_aggregation.clvm",
"stai/wallet/puzzles/rl.clvm",
"stai/wallet/puzzles/sha256tree_module.clvm",
"stai/wallet/puzzles/singleton_top_layer.clvm",
"stai/wallet/puzzles/did_innerpuz.clvm",
"stai/wallet/puzzles/decompress_puzzle.clvm",
"stai/wallet/puzzles/decompress_coin_spend_entry_with_prefix.clvm",
"stai/wallet/puzzles/decompress_coin_spend_entry.clvm",
"stai/wallet/puzzles/block_program_zero.clvm",
"stai/wallet/puzzles/test_generator_deserialize.clvm",
"stai/wallet/puzzles/test_multiple_generator_input_arguments.clvm",
"stai/wallet/puzzles/p2_singleton.clvm",
"stai/wallet/puzzles/pool_waitingroom_innerpuz.clvm",
"stai/wallet/puzzles/pool_member_innerpuz.clvm",
"stai/wallet/puzzles/singleton_launcher.clvm",
"stai/wallet/puzzles/p2_singleton_or_delayed_puzhash.clvm",
]
)
clvm_include_files = set(
["stai/wallet/puzzles/create-lock-puzzlehash.clvm", "stai/wallet/puzzles/condition_codes.clvm"]
)
CLVM_PROGRAM_ROOT = "stai/wallet/puzzles"
def list_files(dir, glob):
dir = Path(dir)
entries = dir.glob(glob)
files = [f for f in entries if f.is_file()]
return files
def read_file(path):
with open(path) as f:
return f.read()
def path_with_ext(path, ext):
return Path(str(path) + ext)
class TestClvmCompilation(TestCase):
"""
These are tests, and not just build scripts to regenerate the bytecode, because
the developer must be aware if the compiled output changes, for any reason.
"""
def test_all_programs_listed(self):
"""
Checks to see if a new .clvm file was added to stai/wallet/puzzles, but not added to `wallet_program_files`
"""
existing_files = list_files(CLVM_PROGRAM_ROOT, "*.clvm")
existing_file_paths = set([Path(x).relative_to(CLVM_PROGRAM_ROOT) for x in existing_files])
expected_files = set(clvm_include_files).union(set(wallet_program_files))
expected_file_paths = set([Path(x).relative_to(CLVM_PROGRAM_ROOT) for x in expected_files])
self.assertEqual(
expected_file_paths,
existing_file_paths,
msg="Please add your new program to `wallet_program_files` or `clvm_include_files.values`",
)
def test_include_and_source_files_separate(self):
self.assertEqual(clvm_include_files.intersection(wallet_program_files), set())
# TODO: Test recompilation with all available compiler configurations & implementations
def test_all_programs_are_compiled(self):
"""Checks to see if a new .clvm file was added without its .hex file"""
all_compiled = True
msg = "Please compile your program with:\n"
# Note that we cannot test all existing .clvm files - some are not
# meant to be run as a "module" with load_clvm; some are include files
# We test for inclusion in `test_all_programs_listed`
for prog_path in wallet_program_files:
try:
output_path = path_with_ext(prog_path, ".hex")
hex = output_path.read_text()
self.assertTrue(len(hex) > 0)
except Exception as ex:
all_compiled = False
msg += f" run -i {prog_path.parent} -d {prog_path} > {prog_path}.hex\n"
print(ex)
msg += "and check it in"
self.assertTrue(all_compiled, msg=msg)
def test_recompilation_matches(self):
self.maxDiff = None
for f in wallet_program_files:
f = Path(f)
compile_clvm(f, path_with_ext(f, ".recompiled"), search_paths=[f.parent])
orig_hex = path_with_ext(f, ".hex").read_text().strip()
new_hex = path_with_ext(f, ".recompiled").read_text().strip()
self.assertEqual(orig_hex, new_hex, msg=f"Compilation of {f} does not match {f}.hex")
pass
def test_all_compiled_programs_are_hashed(self):
"""Checks to see if a .hex file is missing its .sha256tree file"""
all_hashed = True
msg = "Please hash your program with:\n"
for prog_path in wallet_program_files:
try:
hex = path_with_ext(prog_path, ".hex.sha256tree").read_text()
self.assertTrue(len(hex) > 0)
except Exception as ex:
print(ex)
all_hashed = False
msg += f" opd -H {prog_path}.hex | head -1 > {prog_path}.hex.sha256tree\n"
msg += "and check it in"
self.assertTrue(all_hashed, msg)
# TODO: Test all available shatree implementations on all progams
def test_shatrees_match(self):
"""Checks to see that all .sha256tree files match their .hex files"""
for prog_path in wallet_program_files:
# load the .hex file as a program
hex_filename = path_with_ext(prog_path, ".hex")
clvm_hex = hex_filename.read_text() # .decode("utf8")
clvm_blob = bytes.fromhex(clvm_hex)
s = SerializedProgram.from_bytes(clvm_blob)
p = Program.from_bytes(clvm_blob)
# load the checked-in shatree
existing_sha = path_with_ext(prog_path, ".hex.sha256tree").read_text().strip()
self.assertEqual(
s.get_tree_hash().hex(),
existing_sha,
msg=f"Checked-in shatree hash file does not match shatree hash of loaded SerializedProgram: {prog_path}", # noqa
)
self.assertEqual(
p.get_tree_hash().hex(),
existing_sha,
msg=f"Checked-in shatree hash file does not match shatree hash of loaded Program: {prog_path}",
)
|
py | b406174e6a2c807df6743c85fb23d8505b26944d | from SignLanguage import create_app
app = create_app()
if __name__ == '__main__':
app.run(debug=True)
|
py | b406177c90b1e4673c03d27022d8fc35f15579a1 | from mavros_msgs.msg import Waypoint
MAV_GLOBAL_FRAME = 3
MAV_CMD_WAYPOINT = 16
MAV_CMD_RTL = 20
def waypoint(lat, lon, alt, delay):
w = Waypoint()
w.frame = MAV_GLOBAL_FRAME
w.command = MAV_CMD_WAYPOINT
w.is_current = False
w.autocontinue = True
w.param1 = delay # Hold time in mession
w.param2 = 2 # Position trashold in meters
w.x_lat = lat
w.y_long = lon
w.z_alt = alt
return w
def rtl():
w = Waypoint()
w.command = MAV_CMD_RTL
return w
def mission_planner(lat0, lon0, lat, lon):
takeoff = waypoint(lat0, lon0, 10, 5)
takeoff.is_current = True
return [ takeoff
, waypoint(lat0, lon0, 50, 10)
, waypoint(lat, lon, 50, 10)
, rtl() ] |
py | b40617b0921198b824d99d0b3142eab9af2b5d39 | from typing import List, Union, Dict, Any, Tuple
from abc import ABC, abstractmethod
import pandas as pd
import pdfplumber
import layoutparser as lp
import pdf2image
from .datamodel import *
def convert_token_dict_to_layout(tokens):
return lp.Layout(
[
lp.TextBlock(
lp.Rectangle(
x_1=token["x"],
y_1=token["y"],
x_2=token["x"] + token["width"],
y_2=token["y"] + token["height"],
),
text=token["text"],
type=token.get("type"),
)
for token in tokens
]
)
def load_page_data_from_dict(source_data: Dict[str, Any]) -> List[Dict]:
return [
PDFPage(
height=page_data["page"]["height"],
width=page_data["page"]["width"],
tokens=convert_token_dict_to_layout(page_data["tokens"]),
url_tokens=convert_token_dict_to_layout(page_data["url_tokens"]),
lines=lp.Layout(
[
lp.Rectangle(
x_1=line["x"],
y_1=line["y"],
x_2=line["x"] + line["width"],
y_2=line["y"] + line["height"],
)
for line in page_data["lines"]
]
),
)
for page_data in source_data
]
class BasePDFTokenExtractor(ABC):
"""PDF token extractors will load all the *tokens* and save using pdfstructure service."""
def __call__(self, pdf_path: str):
return self.extract(pdf_path)
@abstractmethod
def extract(self, pdf_path: str):
"""Extract PDF Tokens from the input pdf_path
Args:
pdf_path (str):
The path to a PDF file
Returns:
"""
pass
class PDFPlumberTokenExtractor(BasePDFTokenExtractor):
NAME = "pdfplumber"
UNDERLINE_HEIGHT_THRESHOLD = 3
UNDERLINE_WIDTH_THRESHOLD = 100
# Defines what a regular underline should look like
@staticmethod
def convert_to_pagetoken(row: pd.Series) -> Dict:
"""Convert a row in a DataFrame to pagetoken"""
return dict(
text=row["text"],
x=row["x0"],
width=row["width"],
y=row["top"],
height=row["height"],
type=row.get("fontname"),
)
def obtain_word_tokens(self, cur_page: pdfplumber.page.Page) -> List[Dict]:
"""Obtain all words from the current page.
Args:
cur_page (pdfplumber.page.Page):
the pdfplumber.page.Page object with PDF token information
Returns:
List[PageToken]:
A list of page tokens stored in PageToken format.
"""
words = cur_page.extract_words(
x_tolerance=1.5,
y_tolerance=3,
keep_blank_chars=False,
use_text_flow=True,
horizontal_ltr=True,
vertical_ttb=True,
extra_attrs=["fontname", "size"],
)
if len(words) == 0:
return []
df = pd.DataFrame(words)
# Avoid boxes outside the page
df[["x0", "x1"]] = (
df[["x0", "x1"]].clip(lower=0, upper=int(cur_page.width)).astype("float")
)
df[["top", "bottom"]] = (
df[["top", "bottom"]]
.clip(lower=0, upper=int(cur_page.height))
.astype("float")
)
df["height"] = df["bottom"] - df["top"]
df["width"] = df["x1"] - df["x0"]
word_tokens = df.apply(self.convert_to_pagetoken, axis=1).tolist()
return word_tokens
def obtain_page_hyperlinks(self, cur_page: pdfplumber.page.Page) -> List[Dict]:
if len(cur_page.hyperlinks) == 0:
return []
df = pd.DataFrame(cur_page.hyperlinks)
df[["x0", "x1"]] = (
df[["x0", "x1"]].clip(lower=0, upper=int(cur_page.width)).astype("float")
)
df[["top", "bottom"]] = (
df[["top", "bottom"]]
.clip(lower=0, upper=int(cur_page.height))
.astype("float")
)
df[["height", "width"]] = df[["height", "width"]].astype("float")
hyperlink_tokens = (
df.rename(columns={"uri": "text"})
.apply(self.convert_to_pagetoken, axis=1)
.tolist()
)
return hyperlink_tokens
def obtain_page_lines(self, cur_page: pdfplumber.page.Page) -> List[Dict]:
height = float(cur_page.height)
page_objs = cur_page.rects + cur_page.lines
possible_underlines = [
dict(
x=float(ele["x0"]),
y=height - float(ele["y0"]),
height=float(ele["height"]),
width=float(ele["width"]),
)
for ele in filter(
lambda obj: obj["height"] < self.UNDERLINE_HEIGHT_THRESHOLD
and obj["width"] < self.UNDERLINE_WIDTH_THRESHOLD,
page_objs,
)
]
return possible_underlines
def extract(self, pdf_path: str) -> List[Dict]:
"""Extracts token text, positions, and style information from a PDF file.
Args:
pdf_path (str): the path to the pdf file.
include_lines (bool, optional): Whether to include line tokens. Defaults to False.
target_data (str, optional): {"token", "hyperlink"}
Returns:
PdfAnnotations: A `PdfAnnotations` containing all the paper token information.
"""
plumber_pdf_object = pdfplumber.open(pdf_path)
pages = []
for page_id in range(len(plumber_pdf_object.pages)):
cur_page = plumber_pdf_object.pages[page_id]
tokens = self.obtain_word_tokens(cur_page)
url_tokens = self.obtain_page_hyperlinks(cur_page)
lines = self.obtain_page_lines(cur_page)
page = dict(
page=dict(
width=float(cur_page.width),
height=float(cur_page.height),
index=page_id,
),
tokens=tokens,
url_tokens=url_tokens,
lines=lines,
)
pages.append(page)
return load_page_data_from_dict(pages)
class PDFExtractor:
"""PDF Extractor will load both images and layouts for PDF documents for downstream processing."""
def __init__(self, pdf_extractor_name, **kwargs):
self.pdf_extractor_name = pdf_extractor_name.lower()
if self.pdf_extractor_name == PDFPlumberTokenExtractor.NAME:
self.pdf_extractor = PDFPlumberTokenExtractor(**kwargs)
else:
raise NotImplementedError(
f"Unknown pdf_extractor_name {pdf_extractor_name}"
)
def load_tokens_and_image(
self, pdf_path: str, resize_image=False, resize_layout=False, **kwargs
):
pdf_tokens = self.pdf_extractor(pdf_path, **kwargs)
page_images = pdf2image.convert_from_path(pdf_path, dpi=72)
assert not (
resize_image and resize_layout
), "You could not resize image and layout simultaneously."
if resize_layout:
for image, page in zip(page_images, pdf_tokens):
width, height = image.size
resize_factor = width / page.width, height / page.height
page.tokens = page.tokens.scale(resize_factor)
page.image_height = height
page.image_width = width
elif resize_image:
page_images = [
image.resize((int(page.width), int(page.height)))
if page.width != image.size[0]
else image
for image, page in zip(page_images, pdf_tokens)
]
return pdf_tokens, page_images |
py | b40617b2f8b689267935c2beb1faa333fde534eb | # -*- coding: utf-8 -*-
import re
#Credits : stackoverflow
alphabets= "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
def split_into_lists_of_words(text, split_words = True):
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
text = text.replace(' ', " ")
sentences = text.split("<stop>")
sentences = sentences[:-1]
if split_words :
sentences = [s.strip().split(' ') for s in sentences if len(s) > 3]
#sentences = [s.strip() for s in sentences if len(s) > 1]
return sentences
|
py | b4061812c4bd945ff6bd534e0e5a524d4e58b579 | from tool.runners.python import SubmissionPy
class EvqnaSubmission(SubmissionPy):
def is_valid(self, password_entry):
policy, c, pwd = password_entry.split()
policy_min, policy_max = policy.split('-')
return int(policy_min) <= pwd.count(c[0]) <= int(policy_max)
def run(self, input):
return sum(1 for entry in input.splitlines() if self.is_valid(entry))
|
py | b406182542010e1f1b41ea606bb4b3671cad71bd | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for common roidb manipulations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import logging
import numpy as np
import utils.boxes as box_utils
import utils.keypoints as keypoint_utils
import utils.segms as segm_utils
import utils.blob as blob_utils
from core.config import cfg
from .json_dataset import JsonDataset
from .clevr_dataset import ClevrDataset
logger = logging.getLogger(__name__)
def combined_roidb_for_training(dataset_names, proposal_files):
"""Load and concatenate roidbs for one or more datasets, along with optional
object proposals. The roidb entries are then prepared for use in training,
which involves caching certain types of metadata for each roidb entry.
"""
def get_roidb(dataset_name, proposal_file):
if 'clevr' in dataset_name:
ds = ClevrDataset(dataset_name)
else:
ds = JsonDataset(dataset_name)
roidb = ds.get_roidb(
gt=True,
proposal_file=proposal_file,
crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
)
if cfg.TRAIN.USE_FLIPPED:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, ds)
logger.info('Loaded dataset: {:s}'.format(ds.name))
return roidb
if isinstance(dataset_names, six.string_types):
dataset_names = (dataset_names, )
if isinstance(proposal_files, six.string_types):
proposal_files = (proposal_files, )
if len(proposal_files) == 0:
proposal_files = (None, ) * len(dataset_names)
assert len(dataset_names) == len(proposal_files)
roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)]
roidb = roidbs[0]
for r in roidbs[1:]:
roidb.extend(r)
roidb = filter_for_training(roidb)
if cfg.TRAIN.ASPECT_GROUPING or cfg.TRAIN.ASPECT_CROPPING:
logger.info('Computing image aspect ratios and ordering the ratios...')
ratio_list, ratio_index = rank_for_training(roidb)
logger.info('done')
else:
ratio_list, ratio_index = None, None
logger.info('Computing bounding-box regression targets...')
add_bbox_regression_targets(roidb)
logger.info('done')
_compute_and_log_stats(roidb)
return roidb, ratio_list, ratio_index
def extend_with_flipped_entries(roidb, dataset):
"""Flip each entry in the given roidb and return a new roidb that is the
concatenation of the original roidb and the flipped entries.
"Flipping" an entry means that that image and associated metadata (e.g.,
ground truth boxes and object proposals) are horizontally flipped.
"""
flipped_roidb = []
for entry in roidb:
width = entry['width']
boxes = entry['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = width - oldx2 - 1
boxes[:, 2] = width - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
flipped_entry = {}
dont_copy = ('boxes', 'segms', 'gt_keypoints', 'flipped')
for k, v in entry.items():
if k not in dont_copy:
flipped_entry[k] = v
flipped_entry['boxes'] = boxes
flipped_entry['segms'] = segm_utils.flip_segms(
entry['segms'], entry['height'], entry['width']
)
if dataset.keypoints is not None:
flipped_entry['gt_keypoints'] = keypoint_utils.flip_keypoints(
dataset.keypoints, dataset.keypoint_flip_map,
entry['gt_keypoints'], entry['width']
)
flipped_entry['flipped'] = True
flipped_roidb.append(flipped_entry)
roidb.extend(flipped_roidb)
def filter_for_training(roidb):
"""Remove roidb entries that have no usable RoIs based on config settings.
"""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
if cfg.MODEL.KEYPOINTS_ON:
# If we're training for keypoints, exclude images with no keypoints
valid = valid and entry['has_visible_keypoints']
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
logger.info('Filtered {} roidb entries: {} -> {}'.
format(num - num_after, num, num_after))
return filtered_roidb
def rank_for_training(roidb):
"""Rank the roidb entries according to image aspect ration and mark for cropping
for efficient batching if image is too long.
Returns:
ratio_list: ndarray, list of aspect ratios from small to large
ratio_index: ndarray, list of roidb entry indices correspond to the ratios
"""
RATIO_HI = cfg.TRAIN.ASPECT_HI # largest ratio to preserve.
RATIO_LO = cfg.TRAIN.ASPECT_LO # smallest ratio to preserve.
need_crop_cnt = 0
ratio_list = []
for entry in roidb:
width = entry['width']
height = entry['height']
ratio = width / float(height)
if cfg.TRAIN.ASPECT_CROPPING:
if ratio > RATIO_HI:
entry['need_crop'] = True
ratio = RATIO_HI
need_crop_cnt += 1
elif ratio < RATIO_LO:
entry['need_crop'] = True
ratio = RATIO_LO
need_crop_cnt += 1
else:
entry['need_crop'] = False
else:
entry['need_crop'] = False
ratio_list.append(ratio)
if cfg.TRAIN.ASPECT_CROPPING:
logging.info('Number of entries that need to be cropped: %d. Ratio bound: [%.2f, %.2f]',
need_crop_cnt, RATIO_LO, RATIO_HI)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
for entry in roidb:
entry['bbox_targets'] = _compute_targets(entry)
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
def _compute_and_log_stats(roidb):
classes = roidb[0]['dataset'].classes
char_len = np.max([len(c) for c in classes])
hist_bins = np.arange(len(classes) + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((len(classes)), dtype=np.int)
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['gt_classes'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
logger.debug('Ground-truth class histogram:')
for i, v in enumerate(gt_hist):
logger.debug(
'{:d}{:s}: {:d}'.format(
i, classes[i].rjust(char_len), v))
logger.debug('-' * char_len)
logger.debug(
'{:s}: {:d}'.format(
'total'.rjust(char_len), np.sum(gt_hist)))
|
py | b4061a9d8fb2478f34d2845a21f9931d235ca2d5 | '''!
* Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
'''
import json
from typing import IO
from contextlib import contextmanager
import warnings
class TrainingLogRecord(object):
def __init__(self,
record_id: int,
iter_per_learner: int,
logged_metric: float,
trial_time: float,
wall_clock_time: float,
validation_loss,
config,
best_validation_loss,
best_config,
learner,
sample_size):
self.record_id = record_id
self.iter_per_learner = iter_per_learner
self.logged_metric = logged_metric
self.trial_time = trial_time
self.wall_clock_time = wall_clock_time
self.validation_loss = validation_loss
self.config = config
self.best_validation_loss = best_validation_loss
self.best_config = best_config
self.learner = learner
self.sample_size = sample_size
def dump(self, fp: IO[str]):
d = vars(self)
return json.dump(d, fp)
@classmethod
def load(cls, json_str: str):
d = json.loads(json_str)
return cls(**d)
def __str__(self):
return json.dumps(vars(self))
class TrainingLogCheckPoint(TrainingLogRecord):
def __init__(self, curr_best_record_id: int):
self.curr_best_record_id = curr_best_record_id
class TrainingLogWriter(object):
def __init__(self, output_filename: str):
self.output_filename = output_filename
self.file = None
self.current_best_loss_record_id = None
self.current_best_loss = float('+inf')
self.current_sample_size = None
self.current_record_id = 0
def open(self):
self.file = open(self.output_filename, 'w')
def append_open(self):
self.file = open(self.output_filename, 'a')
def append(self,
it_counter: int,
train_loss: float,
trial_time: float,
wall_clock_time: float,
validation_loss,
config,
best_validation_loss,
best_config,
learner,
sample_size):
if self.file is None:
raise IOError("Call open() to open the outpute file first.")
if validation_loss is None:
raise ValueError('TEST LOSS NONE ERROR!!!')
record = TrainingLogRecord(self.current_record_id,
it_counter,
train_loss,
trial_time,
wall_clock_time,
validation_loss,
config,
best_validation_loss,
best_config,
learner,
sample_size)
if validation_loss < self.current_best_loss or \
validation_loss == self.current_best_loss and \
self.current_sample_size is not None and \
sample_size > self.current_sample_size:
self.current_best_loss = validation_loss
self.current_sample_size = sample_size
self.current_best_loss_record_id = self.current_record_id
self.current_record_id += 1
record.dump(self.file)
self.file.write('\n')
self.file.flush()
def checkpoint(self):
if self.file is None:
raise IOError("Call open() to open the outpute file first.")
if self.current_best_loss_record_id is None:
warnings.warn("checkpoint() called before any record is written, "
"skipped.")
return
record = TrainingLogCheckPoint(self.current_best_loss_record_id)
record.dump(self.file)
self.file.write('\n')
self.file.flush()
def close(self):
if self.file is not None:
self.file.close()
self.file = None # for pickle
class TrainingLogReader(object):
def __init__(self, filename: str):
self.filename = filename
self.file = None
def open(self):
self.file = open(self.filename)
def records(self):
if self.file is None:
raise IOError("Call open() before reading log file.")
for line in self.file:
data = json.loads(line)
if len(data) == 1:
# Skip checkpoints.
continue
yield TrainingLogRecord(**data)
def close(self):
if self.file is not None:
self.file.close()
self.file = None # for pickle
def get_record(self, record_id) -> TrainingLogRecord:
if self.file is None:
raise IOError("Call open() before reading log file.")
for rec in self.records():
if rec.record_id == record_id:
return rec
raise ValueError(f"Cannot find record with id {record_id}.")
@contextmanager
def training_log_writer(filename: str, append: bool = False):
try:
w = TrainingLogWriter(filename)
if not append:
w.open()
else:
w.append_open()
yield w
finally:
w.close()
@contextmanager
def training_log_reader(filename: str):
try:
r = TrainingLogReader(filename)
r.open()
yield r
finally:
r.close()
|
py | b4061ab57e2f2604f0574865e88656421caa0133 |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class RSMPIAType(TREElement):
def __init__(self, value):
super(RSMPIAType, self).__init__()
self.add_field('IID', 's', 80, value)
self.add_field('EDITION', 's', 40, value)
self.add_field('R0', 's', 21, value)
self.add_field('RX', 's', 21, value)
self.add_field('RY', 's', 21, value)
self.add_field('RZ', 's', 21, value)
self.add_field('RXX', 's', 21, value)
self.add_field('RXY', 's', 21, value)
self.add_field('RXZ', 's', 21, value)
self.add_field('RYY', 's', 21, value)
self.add_field('RYZ', 's', 21, value)
self.add_field('RZZ', 's', 21, value)
self.add_field('C0', 's', 21, value)
self.add_field('CX', 's', 21, value)
self.add_field('CY', 's', 21, value)
self.add_field('CZ', 's', 21, value)
self.add_field('CXX', 's', 21, value)
self.add_field('CXY', 's', 21, value)
self.add_field('CXZ', 's', 21, value)
self.add_field('CYY', 's', 21, value)
self.add_field('CYZ', 's', 21, value)
self.add_field('CZZ', 's', 21, value)
self.add_field('RNIS', 's', 3, value)
self.add_field('CNIS', 's', 3, value)
self.add_field('TNIS', 's', 3, value)
self.add_field('RSSIZ', 's', 21, value)
self.add_field('CSSIZ', 's', 21, value)
class RSMPIA(TREExtension):
_tag_value = 'RSMPIA'
_data_type = RSMPIAType
|
py | b4061adc9259055fce6a81e0478e169d807fbab7 | from app.models import Configs, Devices
from app import db
# The function is needed to check if the device is in database
def get_exist_device_on_db(ipaddress: str) -> bool:
"""
The function is needed to check if the device is in database
"""
try:
# Get last configurations from DB
data = (
Devices.query.order_by(Devices.timestamp.desc())
.filter_by(device_ip=ipaddress)
.first()
)
if data:
return True
else:
return False
except:
return False
# The function gets env for all devices from database
def get_devices_env() -> dict:
"""
The function gets env for all devices from database
return:
Devices env dict
"""
# Create dict for device environment data
devices_env_dict = {}
# Gets devices ip from database
data = Devices.query.order_by(Devices.device_ip.desc())
# Create list for device ip addresses
ip_list = [ip.device_ip for ip in data]
# Create a tuple for unique ip addresses
ip_list = sorted(tuple(set(ip_list)))
# This variable need to create html element id for accordion
for html_element_id, ip in enumerate(ip_list, start=1):
db_data = get_last_env_for_device_from_db(ip)
# Checking if the previous configuration exists to enable/disable
# the "Compare configuration" button on the device page
check_previous_config = check_if_previous_configuration_exists(ipaddress=ip)
# Getting last config timestamp for device page
last_config_timestamp = get_last_config_for_device(ipaddress=ip)["timestamp"]
# If the latest configuration does not exist, return "No backup yet"
if last_config_timestamp is None:
last_config_timestamp = "No backup yet"
# Update device dict
devices_env_dict.update(
{
ip: {
"html_element_id": f"{html_element_id}",
"hostname": db_data["hostname"],
"vendor": db_data["vendor"],
"model": db_data["model"],
"os_version": db_data["os_version"],
"sn": db_data["sn"],
"uptime": db_data["uptime"],
"connection_status": db_data["connection_status"],
"timestamp": db_data["timestamp"],
"check_previous_config": check_previous_config,
"last_config_timestamp": last_config_timestamp,
}
}
)
return devices_env_dict
# The function gets the latest env from the database for the provided device
def get_last_env_for_device_from_db(ipaddress: str) -> dict or None:
"""
Need to parm:
Ipaddress
return:
device env dict or None
"""
try:
# Get last configurations from DB
data = (
Devices.query.order_by(Devices.timestamp.desc())
.filter_by(device_ip=ipaddress)
.first()
)
# Variable for device env
db_last_ipaddress = data.device_ip
db_last_hostname = data.device_hostname
db_device_vendor = data.device_vendor
db_device_model = data.device_model
db_device_os_version = data.device_os_version
db_device_sn = data.device_sn
db_device_uptime = data.device_uptime
db_connection_status = data.connection_status
# Variable to set the timestamp
db_last_timestamp = data.timestamp
# Return device env dict
return {
"ipaddress": db_last_ipaddress,
"hostname": db_last_hostname,
"vendor": db_device_vendor,
"model": db_device_model,
"os_version": db_device_os_version,
"sn": db_device_sn,
"connection_status": db_connection_status,
"uptime": db_device_uptime,
"timestamp": db_last_timestamp,
}
except Exception as db_error:
print(db_error)
# If env not found return None
return None
# This function update a device environment file to the DB
def update_device_env_on_db(
ipaddress: str,
hostname: str,
vendor: str,
model: str,
os_version: str,
sn: str,
uptime: str,
connection_status: str,
connection_driver: str,
timestamp: str,
) -> None:
"""
This function update a device environment file to the DB
parm:
ipaddress: str
hostname: str
vendor: str
model: str
os_version: str
sn: str
uptime: str
timestamp: str
return:
None
"""
try:
# Getting device data from db
data = db.session.query(Devices).filter_by(device_ip=ipaddress).first()
# If device hostname changed overwrite data on db
if data.device_hostname != hostname:
data.device_hostname = hostname
# If device vendor name changed overwrite data on db
if data.device_vendor != vendor:
data.device_vendor = vendor
# If device model changed overwrite data on db
if data.device_model != model:
data.device_model = model
# If device os version changed overwrite data on db
if data.device_os_version != os_version:
data.device_os_version = os_version
# If device serial number changed overwrite data on db
if data.device_sn != sn:
data.device_sn = sn
if data.connection_status != connection_status:
data.connection_status = connection_status
if data.connection_driver != connection_driver:
data.connection_driver = connection_driver
# Overwrite device uptime on db
data.device_uptime = uptime
# Overwrite timestamp on db
data.timestamp = timestamp
# Apply changing
db.session.commit()
db.session.close()
except Exception as update_sql_error:
# If an error occurs as a result of writing to the DB,
# then rollback the DB and write a message to the log
print(update_sql_error)
db.session.rollback()
# This function update a device environment file to the DB
def update_device_status_on_db(
ipaddress: str,
connection_status: str,
timestamp: str,
) -> None:
"""
This function update a device environment file to the DB
parm:
ipaddress: str
connection_status: str
timestamp: str
return:
None
"""
try:
# Getting device data from db
data = db.session.query(Devices).filter_by(device_ip=ipaddress).first()
if data.connection_status != connection_status:
data.connection_status = connection_status
# Overwrite timestamp on db
data.timestamp = timestamp
# Apply changing
db.session.commit()
db.session.close()
except Exception as update_sql_error:
# If an error occurs as a result of writing to the DB,
# then rollback the DB and write a message to the log
print(update_sql_error)
db.session.rollback()
# This function writes a new device environment file to the DB if device is not exist
def write_device_env_on_db(
ipaddress: str,
hostname: str,
vendor: str,
model: str,
os_version: str,
sn: str,
uptime: str,
connection_status: str,
connection_driver: str,
) -> None:
"""
This function writes a new device environment file to the DB if device is not exist
Need to parm:
ipaddress: str
hostname: str
vendor: str
model: str
os_version: str
sn: str
uptime: str
Ipaddress and config, timestamp generated automatically
return:
None
"""
# We form a request to the database and pass the IP address and device environment
device_env = Devices(
device_ip=ipaddress,
device_hostname=hostname,
device_vendor=vendor,
device_model=model,
device_os_version=os_version,
device_sn=sn,
device_uptime=uptime,
connection_status=connection_status,
connection_driver=connection_driver,
)
try:
# Sending data in BD
db.session.add(device_env)
# Committing changes
db.session.commit()
db.session.close()
except Exception as write_sql_error:
# If an error occurs as a result of writing to the DB,
# then rollback the DB and write a message to the log
print(write_sql_error)
db.session.rollback()
# The function gets the latest configuration file from the database for the provided device
def get_last_config_for_device(ipaddress: str) -> dict or None:
"""
Need to parm:
Ipaddress: str
return:
Dict or None
"""
try:
# Get last configurations from DB
data = (
Configs.query.order_by(Configs.timestamp.desc())
.filter_by(device_ip=ipaddress)
.first()
)
# Variable for device configuration
db_last_config = data.device_config
# Variable to set the timestamp
db_last_timestamp = data.timestamp
return {"last_config": db_last_config, "timestamp": db_last_timestamp}
except:
# If configuration not found return None
return None
# This function gets all timestamps for which there is a configuration for this device
def get_all_cfg_timestamp_for_device(ipaddress: str) -> list or None:
"""
Need to parm:
Ipaddress: str
return
List or None
"""
try:
# Gets all timestamp from DB
data = Configs.query.order_by(Configs.timestamp.desc()).filter_by(
device_ip=ipaddress
)
# Return list minus last config timestamp
return [db_timestamp.timestamp for db_timestamp in data[1:]]
except:
# If timestamp not found return None
return None
# This function gets the previous config for this device from the DB
def get_previous_config(ipaddress: str, db_timestamp: str) -> str or None:
"""
Need to parm:
Ipaddress
timestamp
return
str or None
"""
try:
# Get configurations from DB
data = Configs.query.order_by(Configs.timestamp.desc()).filter_by(
device_ip=ipaddress, timestamp=db_timestamp
)
# The database returns a list, we get text data from it and return it from the function
return data[0].device_config
except:
# If config not found return None
return None
# This function is needed to check if there are previous configuration versions
# for the device in the database check
def check_if_previous_configuration_exists(ipaddress: str) -> bool:
"""
# This function is needed to check
if there are previous configuration versions
for the device in the database check
Parm:
ipaddress: str
return:
bool
"""
# Get configurations from DB
data = Configs.query.order_by(Configs.timestamp.desc()).filter_by(
device_ip=ipaddress
)
# Len configs
configs_list = [ip.device_ip for ip in data]
return True if len(configs_list) > 1 else False
# This function writes a new configuration file to the DB
def write_cfg_on_db(ipaddress: str, config: str) -> None:
"""
Need to parm:
Ipaddress and config, timestamp generated automatically
return
None
"""
# We form a request to the database and pass the IP address and device configuration
config = Configs(device_ip=ipaddress, device_config=config)
try:
# Sending data in BD
db.session.add(config)
# Committing changes
db.session.commit()
db.session.close()
except Exception as write_sql_error:
# If an error occurs as a result of writing to the DB,
# then rollback the DB and write a message to the log
print(write_sql_error)
db.session.rollback()
if __name__ == "__main__":
print(get_last_env_for_device_from_db(ipaddress="10.255.101.190"))
|
py | b4061b79357b87ad1b8fdfa80eb87822e241e2f7 | import compileall
import contextlib
import filecmp
import importlib.util
import io
import itertools
import os
import pathlib
import py_compile
import shutil
import struct
import sys
import tempfile
import test.test_importlib.util
import time
import unittest
from unittest import mock, skipUnless
try:
from concurrent.futures import ProcessPoolExecutor
_have_multiprocessing = True
except ImportError:
_have_multiprocessing = False
from test import support
from test.support import script_helper
from .test_py_compile import without_source_date_epoch
from .test_py_compile import SourceDateEpochTestMeta
def get_pyc(script, opt):
if not opt:
# Replace None and 0 with ''
opt = ''
return importlib.util.cache_from_source(script, optimization=opt)
def get_pycs(script):
return [get_pyc(script, opt) for opt in (0, 1, 2)]
def is_hardlink(filename1, filename2):
"""Returns True if two files have the same inode (hardlink)"""
inode1 = os.stat(filename1).st_ino
inode2 = os.stat(filename2).st_ino
return inode1 == inode2
class CompileallTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
self.source_path2 = os.path.join(self.directory, '_test2.py')
self.bc_path2 = importlib.util.cache_from_source(self.source_path2)
shutil.copyfile(self.source_path, self.source_path2)
self.subdirectory = os.path.join(self.directory, '_subdir')
os.mkdir(self.subdirectory)
self.source_path3 = os.path.join(self.subdirectory, '_test3.py')
shutil.copyfile(self.source_path, self.source_path3)
def tearDown(self):
shutil.rmtree(self.directory)
def add_bad_source_file(self):
self.bad_source_path = os.path.join(self.directory, '_test_bad.py')
with open(self.bad_source_path, 'w') as file:
file.write('x (\n')
def timestamp_metadata(self):
with open(self.bc_path, 'rb') as file:
data = file.read(12)
mtime = int(os.stat(self.source_path).st_mtime)
compare = struct.pack('<4sll', importlib.util.MAGIC_NUMBER, 0, mtime)
return data, compare
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
if os.environ.get('SOURCE_DATE_EPOCH'):
raise unittest.SkipTest('SOURCE_DATE_EPOCH is set')
py_compile.compile(self.source_path)
self.assertEqual(*self.timestamp_metadata())
with open(self.bc_path, 'rb') as file:
bc = file.read()[len(metadata):]
with open(self.bc_path, 'wb') as file:
file.write(metadata)
file.write(bc)
self.assertNotEqual(*self.timestamp_metadata())
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(*self.timestamp_metadata())
def test_mtime(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
0, 1))
def test_magic_number(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(b'\0\0\0\0')
def test_compile_files(self):
# Test compiling a single file, and complete directory
for fn in (self.bc_path, self.bc_path2):
try:
os.unlink(fn)
except:
pass
self.assertTrue(compileall.compile_file(self.source_path,
force=False, quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
not os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
self.assertTrue(compileall.compile_dir(self.directory, force=False,
quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
os.unlink(self.bc_path2)
# Test against bad files
self.add_bad_source_file()
self.assertFalse(compileall.compile_file(self.bad_source_path,
force=False, quiet=2))
self.assertFalse(compileall.compile_dir(self.directory,
force=False, quiet=2))
def test_compile_file_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
# we should also test the output
with support.captured_stdout() as stdout:
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path)))
self.assertRegex(stdout.getvalue(), r'Compiling ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_file_pathlike_ddir(self):
self.assertFalse(os.path.isfile(self.bc_path))
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path),
ddir=pathlib.Path('ddir_path'),
quiet=2))
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_path(self):
with test.test_importlib.util.import_state(path=[self.directory]):
self.assertTrue(compileall.compile_path(quiet=2))
with test.test_importlib.util.import_state(path=[self.directory]):
self.add_bad_source_file()
self.assertFalse(compileall.compile_path(skip_curdir=False,
force=True, quiet=2))
def test_no_pycache_in_non_package(self):
# Bug 8563 reported that __pycache__ directories got created by
# compile_file() for non-.py files.
data_dir = os.path.join(self.directory, 'data')
data_file = os.path.join(data_dir, 'file')
os.mkdir(data_dir)
# touch data/file
with open(data_file, 'w'):
pass
compileall.compile_file(data_file)
self.assertFalse(os.path.exists(os.path.join(data_dir, '__pycache__')))
def test_optimize(self):
# make sure compiling with different optimization settings than the
# interpreter's creates the correct file names
optimize, opt = (1, 1) if __debug__ else (0, '')
compileall.compile_dir(self.directory, quiet=True, optimize=optimize)
cached = importlib.util.cache_from_source(self.source_path,
optimization=opt)
self.assertTrue(os.path.isfile(cached))
cached2 = importlib.util.cache_from_source(self.source_path2,
optimization=opt)
self.assertTrue(os.path.isfile(cached2))
cached3 = importlib.util.cache_from_source(self.source_path3,
optimization=opt)
self.assertTrue(os.path.isfile(cached3))
def test_compile_dir_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
with support.captured_stdout() as stdout:
compileall.compile_dir(pathlib.Path(self.directory))
line = stdout.getvalue().splitlines()[0]
self.assertRegex(line, r'Listing ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_pool_called(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(pool_mock.called)
def test_compile_workers_non_positive(self):
with self.assertRaisesRegex(ValueError,
"workers must be greater or equal to 0"):
compileall.compile_dir(self.directory, workers=-1)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_workers_cpu_count(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=0)
self.assertEqual(pool_mock.call_args[1]['max_workers'], None)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
@mock.patch('compileall.compile_file')
def test_compile_one_worker(self, compile_file_mock, pool_mock):
compileall.compile_dir(self.directory, quiet=True)
self.assertFalse(pool_mock.called)
self.assertTrue(compile_file_mock.called)
@mock.patch('concurrent.futures.ProcessPoolExecutor', new=None)
@mock.patch('compileall.compile_file')
def test_compile_missing_multiprocessing(self, compile_file_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(compile_file_mock.called)
def test_compile_dir_maxlevels(self):
# Test the actual impact of maxlevels parameter
depth = 3
path = self.directory
for i in range(1, depth + 1):
path = os.path.join(path, f"dir_{i}")
source = os.path.join(path, 'script.py')
os.mkdir(path)
shutil.copyfile(self.source_path, source)
pyc_filename = importlib.util.cache_from_source(source)
compileall.compile_dir(self.directory, quiet=True, maxlevels=depth - 1)
self.assertFalse(os.path.isfile(pyc_filename))
compileall.compile_dir(self.directory, quiet=True, maxlevels=depth)
self.assertTrue(os.path.isfile(pyc_filename))
def _test_ddir_only(self, *, ddir, parallel=True):
"""Recursive compile_dir ddir must contain package paths; bpo39769."""
fullpath = ["test", "foo"]
path = self.directory
mods = []
for subdir in fullpath:
path = os.path.join(path, subdir)
os.mkdir(path)
script_helper.make_script(path, "__init__", "")
mods.append(script_helper.make_script(path, "mod",
"def fn(): 1/0\nfn()\n"))
compileall.compile_dir(
self.directory, quiet=True, ddir=ddir,
workers=2 if parallel else 1)
self.assertTrue(mods)
for mod in mods:
self.assertTrue(mod.startswith(self.directory), mod)
modcode = importlib.util.cache_from_source(mod)
modpath = mod[len(self.directory+os.sep):]
_, _, err = script_helper.assert_python_failure(modcode)
expected_in = os.path.join(ddir, modpath)
mod_code_obj = test.test_importlib.util.get_code_from_pyc(modcode)
self.assertEqual(mod_code_obj.co_filename, expected_in)
self.assertIn(f'"{expected_in}"', os.fsdecode(err))
def test_ddir_only_one_worker(self):
"""Recursive compile_dir ddir= contains package paths; bpo39769."""
return self._test_ddir_only(ddir="<a prefix>", parallel=False)
def test_ddir_multiple_workers(self):
"""Recursive compile_dir ddir= contains package paths; bpo39769."""
return self._test_ddir_only(ddir="<a prefix>", parallel=True)
def test_ddir_empty_only_one_worker(self):
"""Recursive compile_dir ddir='' contains package paths; bpo39769."""
return self._test_ddir_only(ddir="", parallel=False)
def test_ddir_empty_multiple_workers(self):
"""Recursive compile_dir ddir='' contains package paths; bpo39769."""
return self._test_ddir_only(ddir="", parallel=True)
def test_strip_only(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
stripdir = os.path.join(self.directory, *fullpath[:2])
compileall.compile_dir(path, quiet=True, stripdir=stripdir)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(*fullpath[2:])
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
self.assertNotIn(
stripdir,
str(err, encoding=sys.getdefaultencoding())
)
def test_prepend_only(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
prependdir = "/foo"
compileall.compile_dir(path, quiet=True, prependdir=prependdir)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(prependdir, self.directory, *fullpath)
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
def test_strip_and_prepend(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
stripdir = os.path.join(self.directory, *fullpath[:2])
prependdir = "/foo"
compileall.compile_dir(path, quiet=True,
stripdir=stripdir, prependdir=prependdir)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(prependdir, *fullpath[2:])
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
self.assertNotIn(
stripdir,
str(err, encoding=sys.getdefaultencoding())
)
def test_strip_prepend_and_ddir(self):
fullpath = ["test", "build", "real", "path", "ddir"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script_helper.make_script(path, "test", "1 / 0")
with self.assertRaises(ValueError):
compileall.compile_dir(path, quiet=True, ddir="/bar",
stripdir="/foo", prependdir="/bar")
def test_multiple_optimization_levels(self):
script = script_helper.make_script(self.directory,
"test_optimization",
"a = 0")
bc = []
for opt_level in "", 1, 2, 3:
bc.append(importlib.util.cache_from_source(script,
optimization=opt_level))
test_combinations = [[0, 1], [1, 2], [0, 2], [0, 1, 2]]
for opt_combination in test_combinations:
compileall.compile_file(script, quiet=True,
optimize=opt_combination)
for opt_level in opt_combination:
self.assertTrue(os.path.isfile(bc[opt_level]))
try:
os.unlink(bc[opt_level])
except Exception:
pass
@support.skip_unless_symlink
def test_ignore_symlink_destination(self):
# Create folders for allowed files, symlinks and prohibited area
allowed_path = os.path.join(self.directory, "test", "dir", "allowed")
symlinks_path = os.path.join(self.directory, "test", "dir", "symlinks")
prohibited_path = os.path.join(self.directory, "test", "dir", "prohibited")
os.makedirs(allowed_path)
os.makedirs(symlinks_path)
os.makedirs(prohibited_path)
# Create scripts and symlinks and remember their byte-compiled versions
allowed_script = script_helper.make_script(allowed_path, "test_allowed", "a = 0")
prohibited_script = script_helper.make_script(prohibited_path, "test_prohibited", "a = 0")
allowed_symlink = os.path.join(symlinks_path, "test_allowed.py")
prohibited_symlink = os.path.join(symlinks_path, "test_prohibited.py")
os.symlink(allowed_script, allowed_symlink)
os.symlink(prohibited_script, prohibited_symlink)
allowed_bc = importlib.util.cache_from_source(allowed_symlink)
prohibited_bc = importlib.util.cache_from_source(prohibited_symlink)
compileall.compile_dir(symlinks_path, quiet=True, limit_sl_dest=allowed_path)
self.assertTrue(os.path.isfile(allowed_bc))
self.assertFalse(os.path.isfile(prohibited_bc))
class CompileallTestsWithSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CompileallTestsWithoutSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class EncodingTest(unittest.TestCase):
"""Issue 6716: compileall should escape source code when printing errors
to stdout."""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
with open(self.source_path, 'w', encoding='utf-8') as file:
file.write('# -*- coding: utf-8 -*-\n')
file.write('print u"\u20ac"\n')
def tearDown(self):
shutil.rmtree(self.directory)
def test_error(self):
try:
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(),encoding='ascii')
compileall.compile_dir(self.directory)
finally:
sys.stdout = orig_stdout
class CommandLineTestsBase:
"""Test compileall's CLI."""
@classmethod
def setUpClass(cls):
for path in filter(os.path.isdir, sys.path):
directory_created = False
directory = pathlib.Path(path) / '__pycache__'
path = directory / 'test.try'
try:
if not directory.is_dir():
directory.mkdir()
directory_created = True
with path.open('w') as file:
file.write('# for test_compileall')
except OSError:
sys_path_writable = False
break
finally:
support.unlink(str(path))
if directory_created:
directory.rmdir()
else:
sys_path_writable = True
cls._sys_path_writable = sys_path_writable
def _skip_if_sys_path_not_writable(self):
if not self._sys_path_writable:
raise unittest.SkipTest('not all entries on sys.path are writable')
def _get_run_args(self, args):
return [*support.optim_args_from_interpreter_flags(),
'-S', '-m', 'compileall',
*args]
def assertRunOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_ok(
*self._get_run_args(args), **env_vars)
self.assertEqual(b'', err)
return out
def assertRunNotOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_failure(
*self._get_run_args(args), **env_vars)
return rc, out, err
def assertCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertTrue(os.path.exists(path))
def assertNotCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertFalse(os.path.exists(path))
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(support.rmtree, self.directory)
self.pkgdir = os.path.join(self.directory, 'foo')
os.mkdir(self.pkgdir)
self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
# Create the __init__.py and a package module.
self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
def test_no_args_compiles_path(self):
# Note that -l is implied for the no args case.
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
self.assertCompiled(bazfn)
self.assertNotCompiled(self.initfn)
self.assertNotCompiled(self.barfn)
@without_source_date_epoch # timestamp invalidation test
def test_no_args_respects_force_flag(self):
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
pycpath = importlib.util.cache_from_source(bazfn)
# Set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# Without force, no recompilation
self.assertRunOK(PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# Now force it.
self.assertRunOK('-f', PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_no_args_respects_quiet_flag(self):
self._skip_if_sys_path_not_writable()
script_helper.make_script(self.directory, 'baz', '')
noisy = self.assertRunOK(PYTHONPATH=self.directory)
self.assertIn(b'Listing ', noisy)
quiet = self.assertRunOK('-q', PYTHONPATH=self.directory)
self.assertNotIn(b'Listing ', quiet)
# Ensure that the default behavior of compileall's CLI is to create
# PEP 3147/PEP 488 pyc files.
for name, ext, switch in [
('normal', 'pyc', []),
('optimize', 'opt-1.pyc', ['-O']),
('doubleoptimize', 'opt-2.pyc', ['-OO']),
]:
def f(self, ext=ext, switch=switch):
script_helper.assert_python_ok(*(switch +
['-m', 'compileall', '-q', self.pkgdir]))
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
expected = sorted(base.format(sys.implementation.cache_tag, ext)
for base in ('__init__.{}.{}', 'bar.{}.{}'))
self.assertEqual(sorted(os.listdir(self.pkgdir_cachedir)), expected)
# Make sure there are no .pyc files in the source directory.
self.assertFalse([fn for fn in os.listdir(self.pkgdir)
if fn.endswith(ext)])
locals()['test_pep3147_paths_' + name] = f
def test_legacy_paths(self):
# Ensure that with the proper switch, compileall leaves legacy
# pyc files, and no __pycache__ directory.
self.assertRunOK('-b', '-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertFalse(os.path.exists(self.pkgdir_cachedir))
expected = sorted(['__init__.py', '__init__.pyc', 'bar.py',
'bar.pyc'])
self.assertEqual(sorted(os.listdir(self.pkgdir)), expected)
def test_multiple_runs(self):
# Bug 8527 reported that multiple calls produced empty
# __pycache__/__pycache__ directories.
self.assertRunOK('-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
cachecachedir = os.path.join(self.pkgdir_cachedir, '__pycache__')
self.assertFalse(os.path.exists(cachecachedir))
# Call compileall again.
self.assertRunOK('-q', self.pkgdir)
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
self.assertFalse(os.path.exists(cachecachedir))
@without_source_date_epoch # timestamp invalidation test
def test_force(self):
self.assertRunOK('-q', self.pkgdir)
pycpath = importlib.util.cache_from_source(self.barfn)
# set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# without force, no recompilation
self.assertRunOK('-q', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# now force it.
self.assertRunOK('-q', '-f', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_recursion_control(self):
subpackage = os.path.join(self.pkgdir, 'spam')
os.mkdir(subpackage)
subinitfn = script_helper.make_script(subpackage, '__init__', '')
hamfn = script_helper.make_script(subpackage, 'ham', '')
self.assertRunOK('-q', '-l', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
def test_recursion_limit(self):
subpackage = os.path.join(self.pkgdir, 'spam')
subpackage2 = os.path.join(subpackage, 'ham')
subpackage3 = os.path.join(subpackage2, 'eggs')
for pkg in (subpackage, subpackage2, subpackage3):
script_helper.make_pkg(pkg)
subinitfn = os.path.join(subpackage, '__init__.py')
hamfn = script_helper.make_script(subpackage, 'ham', '')
spamfn = script_helper.make_script(subpackage2, 'spam', '')
eggfn = script_helper.make_script(subpackage3, 'egg', '')
self.assertRunOK('-q', '-r 0', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(
os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', '-r 1', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertNotCompiled(spamfn)
self.assertRunOK('-q', '-r 2', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertNotCompiled(eggfn)
self.assertRunOK('-q', '-r 5', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertCompiled(eggfn)
@support.skip_unless_symlink
def test_symlink_loop(self):
# Currently, compileall ignores symlinks to directories.
# If that limitation is ever lifted, it should protect against
# recursion in symlink loops.
pkg = os.path.join(self.pkgdir, 'spam')
script_helper.make_pkg(pkg)
os.symlink('.', os.path.join(pkg, 'evil'))
os.symlink('.', os.path.join(pkg, 'evil2'))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(os.path.join(
self.pkgdir, 'spam', 'evil', 'evil2', '__init__.py'
))
def test_quiet(self):
noisy = self.assertRunOK(self.pkgdir)
quiet = self.assertRunOK('-q', self.pkgdir)
self.assertNotEqual(b'', noisy)
self.assertEqual(b'', quiet)
def test_silent(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
_, quiet, _ = self.assertRunNotOK('-q', self.pkgdir)
_, silent, _ = self.assertRunNotOK('-qq', self.pkgdir)
self.assertNotEqual(b'', quiet)
self.assertEqual(b'', silent)
def test_regexp(self):
self.assertRunOK('-q', '-x', r'ba[^\\/]*$', self.pkgdir)
self.assertNotCompiled(self.barfn)
self.assertCompiled(self.initfn)
def test_multiple_dirs(self):
pkgdir2 = os.path.join(self.directory, 'foo2')
os.mkdir(pkgdir2)
init2fn = script_helper.make_script(pkgdir2, '__init__', '')
bar2fn = script_helper.make_script(pkgdir2, 'bar2', '')
self.assertRunOK('-q', self.pkgdir, pkgdir2)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
self.assertCompiled(init2fn)
self.assertCompiled(bar2fn)
def test_d_compile_error(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir)
self.assertRegex(out, b'File "dinsdale')
def test_d_runtime_error(self):
bazfn = script_helper.make_script(self.pkgdir, 'baz', 'raise Exception')
self.assertRunOK('-q', '-d', 'dinsdale', self.pkgdir)
fn = script_helper.make_script(self.pkgdir, 'bing', 'import baz')
pyc = importlib.util.cache_from_source(bazfn)
os.rename(pyc, os.path.join(self.pkgdir, 'baz.pyc'))
os.remove(bazfn)
rc, out, err = script_helper.assert_python_failure(fn, __isolated=False)
self.assertRegex(err, b'File "dinsdale')
def test_include_bad_file(self):
rc, out, err = self.assertRunNotOK(
'-i', os.path.join(self.directory, 'nosuchfile'), self.pkgdir)
self.assertRegex(out, b'rror.*nosuchfile')
self.assertNotRegex(err, b'Traceback')
self.assertFalse(os.path.exists(importlib.util.cache_from_source(
self.pkgdir_cachedir)))
def test_include_file_with_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f1.py')+os.linesep)
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'), f4)
self.assertCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertCompiled(f4)
def test_include_file_no_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'))
self.assertNotCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertNotCompiled(f4)
def test_include_on_stdin(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
p = script_helper.spawn_python(*(self._get_run_args(()) + ['-i', '-']))
p.stdin.write((f3+os.linesep).encode('ascii'))
script_helper.kill_python(p)
self.assertNotCompiled(f1)
self.assertNotCompiled(f2)
self.assertCompiled(f3)
self.assertNotCompiled(f4)
def test_compiles_as_much_as_possible(self):
bingfn = script_helper.make_script(self.pkgdir, 'bing', 'syntax(error')
rc, out, err = self.assertRunNotOK('nosuchfile', self.initfn,
bingfn, self.barfn)
self.assertRegex(out, b'rror')
self.assertNotCompiled(bingfn)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
def test_invalid_arg_produces_message(self):
out = self.assertRunOK('badfilename')
self.assertRegex(out, b"Can't list 'badfilename'")
def test_pyc_invalidation_mode(self):
script_helper.make_script(self.pkgdir, 'f1', '')
pyc = importlib.util.cache_from_source(
os.path.join(self.pkgdir, 'f1.py'))
self.assertRunOK('--invalidation-mode=checked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11)
self.assertRunOK('--invalidation-mode=unchecked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b01)
@skipUnless(_have_multiprocessing, "requires multiprocessing")
def test_workers(self):
bar2fn = script_helper.make_script(self.directory, 'bar2', '')
files = []
for suffix in range(5):
pkgdir = os.path.join(self.directory, 'foo{}'.format(suffix))
os.mkdir(pkgdir)
fn = script_helper.make_script(pkgdir, '__init__', '')
files.append(script_helper.make_script(pkgdir, 'bar2', ''))
self.assertRunOK(self.directory, '-j', '0')
self.assertCompiled(bar2fn)
for file in files:
self.assertCompiled(file)
@mock.patch('compileall.compile_dir')
def test_workers_available_cores(self, compile_dir):
with mock.patch("sys.argv",
new=[sys.executable, self.directory, "-j0"]):
compileall.main()
self.assertTrue(compile_dir.called)
self.assertEqual(compile_dir.call_args[-1]['workers'], 0)
def test_strip_and_prepend(self):
fullpath = ["test", "build", "real", "path"]
path = os.path.join(self.directory, *fullpath)
os.makedirs(path)
script = script_helper.make_script(path, "test", "1 / 0")
bc = importlib.util.cache_from_source(script)
stripdir = os.path.join(self.directory, *fullpath[:2])
prependdir = "/foo"
self.assertRunOK("-s", stripdir, "-p", prependdir, path)
rc, out, err = script_helper.assert_python_failure(bc)
expected_in = os.path.join(prependdir, *fullpath[2:])
self.assertIn(
expected_in,
str(err, encoding=sys.getdefaultencoding())
)
self.assertNotIn(
stripdir,
str(err, encoding=sys.getdefaultencoding())
)
def test_multiple_optimization_levels(self):
path = os.path.join(self.directory, "optimizations")
os.makedirs(path)
script = script_helper.make_script(path,
"test_optimization",
"a = 0")
bc = []
for opt_level in "", 1, 2, 3:
bc.append(importlib.util.cache_from_source(script,
optimization=opt_level))
test_combinations = [["0", "1"],
["1", "2"],
["0", "2"],
["0", "1", "2"]]
for opt_combination in test_combinations:
self.assertRunOK(path, *("-o" + str(n) for n in opt_combination))
for opt_level in opt_combination:
self.assertTrue(os.path.isfile(bc[int(opt_level)]))
try:
os.unlink(bc[opt_level])
except Exception:
pass
@support.skip_unless_symlink
def test_ignore_symlink_destination(self):
# Create folders for allowed files, symlinks and prohibited area
allowed_path = os.path.join(self.directory, "test", "dir", "allowed")
symlinks_path = os.path.join(self.directory, "test", "dir", "symlinks")
prohibited_path = os.path.join(self.directory, "test", "dir", "prohibited")
os.makedirs(allowed_path)
os.makedirs(symlinks_path)
os.makedirs(prohibited_path)
# Create scripts and symlinks and remember their byte-compiled versions
allowed_script = script_helper.make_script(allowed_path, "test_allowed", "a = 0")
prohibited_script = script_helper.make_script(prohibited_path, "test_prohibited", "a = 0")
allowed_symlink = os.path.join(symlinks_path, "test_allowed.py")
prohibited_symlink = os.path.join(symlinks_path, "test_prohibited.py")
os.symlink(allowed_script, allowed_symlink)
os.symlink(prohibited_script, prohibited_symlink)
allowed_bc = importlib.util.cache_from_source(allowed_symlink)
prohibited_bc = importlib.util.cache_from_source(prohibited_symlink)
self.assertRunOK(symlinks_path, "-e", allowed_path)
self.assertTrue(os.path.isfile(allowed_bc))
self.assertFalse(os.path.isfile(prohibited_bc))
def test_hardlink_bad_args(self):
# Bad arguments combination, hardlink deduplication make sense
# only for more than one optimization level
self.assertRunNotOK(self.directory, "-o 1", "--hardlink-dupes")
def test_hardlink(self):
# 'a = 0' code produces the same bytecode for the 3 optimization
# levels. All three .pyc files must have the same inode (hardlinks).
#
# If deduplication is disabled, all pyc files must have different
# inodes.
for dedup in (True, False):
with tempfile.TemporaryDirectory() as path:
with self.subTest(dedup=dedup):
script = script_helper.make_script(path, "script", "a = 0")
pycs = get_pycs(script)
args = ["-q", "-o 0", "-o 1", "-o 2"]
if dedup:
args.append("--hardlink-dupes")
self.assertRunOK(path, *args)
self.assertEqual(is_hardlink(pycs[0], pycs[1]), dedup)
self.assertEqual(is_hardlink(pycs[1], pycs[2]), dedup)
self.assertEqual(is_hardlink(pycs[0], pycs[2]), dedup)
class CommandLineTestsWithSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CommandLineTestsNoSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class HardlinkDedupTestsBase:
# Test hardlink_dupes parameter of compileall.compile_dir()
def setUp(self):
self.path = None
@contextlib.contextmanager
def temporary_directory(self):
with tempfile.TemporaryDirectory() as path:
self.path = path
yield path
self.path = None
def make_script(self, code, name="script"):
return script_helper.make_script(self.path, name, code)
def compile_dir(self, *, dedup=True, optimize=(0, 1, 2), force=False):
compileall.compile_dir(self.path, quiet=True, optimize=optimize,
hardlink_dupes=dedup, force=force)
def test_bad_args(self):
# Bad arguments combination, hardlink deduplication make sense
# only for more than one optimization level
with self.temporary_directory():
self.make_script("pass")
with self.assertRaises(ValueError):
compileall.compile_dir(self.path, quiet=True, optimize=0,
hardlink_dupes=True)
with self.assertRaises(ValueError):
# same optimization level specified twice:
# compile_dir() removes duplicates
compileall.compile_dir(self.path, quiet=True, optimize=[0, 0],
hardlink_dupes=True)
def create_code(self, docstring=False, assertion=False):
lines = []
if docstring:
lines.append("'module docstring'")
lines.append('x = 1')
if assertion:
lines.append("assert x == 1")
return '\n'.join(lines)
def iter_codes(self):
for docstring in (False, True):
for assertion in (False, True):
code = self.create_code(docstring=docstring, assertion=assertion)
yield (code, docstring, assertion)
def test_disabled(self):
# Deduplication disabled, no hardlinks
for code, docstring, assertion in self.iter_codes():
with self.subTest(docstring=docstring, assertion=assertion):
with self.temporary_directory():
script = self.make_script(code)
pycs = get_pycs(script)
self.compile_dir(dedup=False)
self.assertFalse(is_hardlink(pycs[0], pycs[1]))
self.assertFalse(is_hardlink(pycs[0], pycs[2]))
self.assertFalse(is_hardlink(pycs[1], pycs[2]))
def check_hardlinks(self, script, docstring=False, assertion=False):
pycs = get_pycs(script)
self.assertEqual(is_hardlink(pycs[0], pycs[1]),
not assertion)
self.assertEqual(is_hardlink(pycs[0], pycs[2]),
not assertion and not docstring)
self.assertEqual(is_hardlink(pycs[1], pycs[2]),
not docstring)
def test_hardlink(self):
# Test deduplication on all combinations
for code, docstring, assertion in self.iter_codes():
with self.subTest(docstring=docstring, assertion=assertion):
with self.temporary_directory():
script = self.make_script(code)
self.compile_dir()
self.check_hardlinks(script, docstring, assertion)
def test_only_two_levels(self):
# Don't build the 3 optimization levels, but only 2
for opts in ((0, 1), (1, 2), (0, 2)):
with self.subTest(opts=opts):
with self.temporary_directory():
# code with no dostring and no assertion:
# same bytecode for all optimization levels
script = self.make_script(self.create_code())
self.compile_dir(optimize=opts)
pyc1 = get_pyc(script, opts[0])
pyc2 = get_pyc(script, opts[1])
self.assertTrue(is_hardlink(pyc1, pyc2))
def test_duplicated_levels(self):
# compile_dir() must not fail if optimize contains duplicated
# optimization levels and/or if optimization levels are not sorted.
with self.temporary_directory():
# code with no dostring and no assertion:
# same bytecode for all optimization levels
script = self.make_script(self.create_code())
self.compile_dir(optimize=[1, 0, 1, 0])
pyc1 = get_pyc(script, 0)
pyc2 = get_pyc(script, 1)
self.assertTrue(is_hardlink(pyc1, pyc2))
def test_recompilation(self):
# Test compile_dir() when pyc files already exists and the script
# content changed
with self.temporary_directory():
script = self.make_script("a = 0")
self.compile_dir()
# All three levels have the same inode
self.check_hardlinks(script)
pycs = get_pycs(script)
inode = os.stat(pycs[0]).st_ino
# Change of the module content
script = self.make_script("print(0)")
# Recompilation without -o 1
self.compile_dir(optimize=[0, 2], force=True)
# opt-1.pyc should have the same inode as before and others should not
self.assertEqual(inode, os.stat(pycs[1]).st_ino)
self.assertTrue(is_hardlink(pycs[0], pycs[2]))
self.assertNotEqual(inode, os.stat(pycs[2]).st_ino)
# opt-1.pyc and opt-2.pyc have different content
self.assertFalse(filecmp.cmp(pycs[1], pycs[2], shallow=True))
def test_import(self):
# Test that import updates a single pyc file when pyc files already
# exists and the script content changed
with self.temporary_directory():
script = self.make_script(self.create_code(), name="module")
self.compile_dir()
# All three levels have the same inode
self.check_hardlinks(script)
pycs = get_pycs(script)
inode = os.stat(pycs[0]).st_ino
# Change of the module content
script = self.make_script("print(0)", name="module")
# Import the module in Python with -O (optimization level 1)
script_helper.assert_python_ok(
"-O", "-c", "import module", __isolated=False, PYTHONPATH=self.path
)
# Only opt-1.pyc is changed
self.assertEqual(inode, os.stat(pycs[0]).st_ino)
self.assertEqual(inode, os.stat(pycs[2]).st_ino)
self.assertFalse(is_hardlink(pycs[1], pycs[2]))
# opt-1.pyc and opt-2.pyc have different content
self.assertFalse(filecmp.cmp(pycs[1], pycs[2], shallow=True))
class HardlinkDedupTestsWithSourceEpoch(HardlinkDedupTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class HardlinkDedupTestsNoSourceEpoch(HardlinkDedupTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
if __name__ == "__main__":
unittest.main()
|
py | b4061b9e0402d8cad05a5b4d55c52b4b1fa11eca | import logging
from urllib.parse import urlparse
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import StatementReference, User, Statement, Issue
from dbas.input_validator import is_integer
from dbas.lib import get_profile_picture, get_enabled_arguments_as_query, \
get_enabled_premises_as_query
LOG = logging.getLogger(__name__)
def get_references_for_argument(uid, main_page):
"""
Returns all references for the premises group of given argument
:param uid: uid of the argument
:param main_page: current overview page
:return: dict
"""
LOG.debug("%s", uid)
if not is_integer(uid):
return {}, {}
db_arguments = get_enabled_arguments_as_query()
db_argument = db_arguments.filter_by(uid=uid).first()
if not db_argument:
return {}, {}
db_premises = get_enabled_premises_as_query()
db_premises = db_premises.filter_by(premisegroup_uid=db_argument.premisegroup_uid).all()
data = {}
text = {}
for premise in db_premises:
tmp_uid = premise.statement_uid
references_array = __get_references_for_statement(tmp_uid, main_page)[tmp_uid]
data[premise.statement_uid] = references_array
text[premise.statement_uid] = premise.get_text()
if db_argument.conclusion_uid is not None:
tmp_uid = db_argument.conclusion_uid
references_array = __get_references_for_statement(tmp_uid, main_page)[tmp_uid]
data[tmp_uid] = references_array
db_statement = DBDiscussionSession.query(Statement).get(tmp_uid)
text[tmp_uid] = db_statement.get_text()
else:
d, t = get_references_for_argument(db_argument.argument_uid, main_page)
data.update(d)
text.update(t)
return data, text
def get_references_for_statements(uids, main_page):
"""
Returns all references for the current given statements
:param uids: uids of the statement
:param main_page: current overview page
:return: dict
"""
data = {}
text = {}
for uid in uids:
references_array = __get_references_for_statement(uid, main_page)[uid]
data[uid] = references_array
db_statement = DBDiscussionSession.query(Statement).get(uid)
text[uid] = db_statement.get_text()
return data, text
def __get_references_for_statement(uid, main_page):
"""
Returns all references for the current given statement
:param uid: uid of the statement
:param main_page: current overview page
:return: dict
"""
LOG.debug("%s", uid)
db_references = DBDiscussionSession.query(StatementReference).filter_by(statement_uid=uid).all()
references_array = [__get_values_of_reference(ref, main_page) for ref in db_references]
return {uid: references_array}
def __get_values_of_reference(reference: StatementReference, main_page) -> dict:
"""
Creates dictionary with all values of the column
:param reference: Current database row
:param main_page: current overview page
:return: Dictionary with all columns
"""
user: User = DBDiscussionSession.query(User).get(int(reference.author_uid))
img_path: str = get_profile_picture(user, 20, True)
name: str = user.global_nickname
link: str = main_page + '/user/' + str(user.uid)
return {'uid': reference.uid,
'reference': reference.text,
'host': reference.host,
'path': reference.path,
'author': {'img': img_path,
'name': name,
'link': link},
'created': str(reference.created.humanize),
'statement_text': reference.get_statement_text()}
def set_reference(text: str, url: str, user: User, statement: Statement, issue: Issue) -> bool:
"""
Creates a new reference for a statement.
:param issue: The issue of the referenced statement.
:param text: Text of the reference.
:param url: The url for the reference.
:param user: User who referenced the statement.
:param statement: Statement which should be referenced.
:return: Boolean
"""
parsed_url: url = urlparse(url)
host: str = '{}://{}'.format(parsed_url.scheme, parsed_url.netloc)
path: str = '{}?{}'.format(parsed_url.path, parsed_url.query)
DBDiscussionSession.add(StatementReference(text, host, path, user, statement, issue))
DBDiscussionSession.flush()
return True
def get_references(uids, is_argument, application_url) -> dict:
"""
Returns references for an argument or statement.
:param uids: IDs of statements or arguments as list
:param is_argument: boolean if the ids are for arguments
:param application_url: url of the application
:rtype: dict
:return: prepared collection with error, data and text field
"""
if is_argument:
data, text = get_references_for_argument(uids, application_url)
else:
data, text = get_references_for_statements(uids, application_url)
return {
'data': data,
'text': text
}
|
py | b4061c471954663b4f1050d62f3086c87b0a5574 |
from __future__ import division
import math
def addition(a, b):
a = int(a)
b = int(b)
c = b + a
return c
def subtraction(a, b):
a = int(a)
b = int(b)
c = b - a
return c
def multiplication(a, b):
a = int(a)
b = int(b)
c = b * a
return c
def division(a, b):
a = int(a)
b = int(b)
c = float(b / a)
return round(c, 9)
def square(a):
a = int(a)
c = a ** 2
return c
def squareroot(a):
a = int(a)
c = math.sqrt(a)
return float(format(c, '.8f'))
class Calculator:
result = 0
def __init__(self):
pass
def add(self, a, b):
self.result = addition(a, b)
return self.result
def subtract(self, a, b):
self.result = subtraction(a, b)
return self.result
def multiple(self, a, b):
self.result = multiplication(a, b)
return self.result
def div(self, a, b):
self.result = division(a, b)
return self.result
def square(self, a):
self.result = square(a)
return self.result
def sqrt(self, a):
self.result = squareroot(a)
return self.result
|
py | b4061c8f9c5eec896a9f5882989e914051c70dfb | from poodle import Object
import operator as op
from functools import reduce
# Python3 program to calculate nPr
import math
class Combinations(Object):
number: int
combinations: int
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
self.number = 0
self.combinations = 0
def __str__(self): return str(self.combinations)
def ncr(n, r):
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer / denom
combinations_list = []
for i in range(10):
combinations0 = Combinations(i)
combinations0.number = i
combinations0.combinations = int(ncr(i,2))
combinations_list.append(combinations0)
class Permutations(Object):
number: int
permutations: int
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
self.number = 0
self.permutations = 0
def __str__(self): return str(self.permutations)
def fact(n):
if (n <= 1):
return 1
return n * fact(n - 1)
def nPr(n, r):
return math.floor(fact(n) /
fact(n - r))
permutation_list = []
for i in range(10):
permutation0 = Permutations(i)
permutation0.number = i
permutation0.permutations = int(nPr(i,2))
permutation_list.append(permutation0) |
py | b4061d2850eeedc8cd8217cde9fcb41730047466 | """
Initialization module to define constructor classes/ agent implementations in the 'Agents' scope.
To add a new neural network, add a key-argument to the AlphaZeroNetworks or MuZeroNetworks dictionary with as
value the class reference that constructs the neural network.
"""
from .GymNetwork import AlphaZeroGymNetwork, MuZeroGymNetwork
from .AtariNetwork import AlphaZeroAtariNetwork, MuZeroAtariNetwork
from .HexNetwork import AlphaZeroHexNetwork, MuZeroHexNetwork
from .Player import Player, ManualPlayer, RandomPlayer, DeterministicPlayer, \
DefaultMuZeroPlayer, DefaultAlphaZeroPlayer, BlindMuZeroPlayer
# Add your AlphaZero neural network architecture here by referencing the imported Class with a string key.
AlphaZeroNetworks = {
'Hex': AlphaZeroHexNetwork,
'Othello': AlphaZeroHexNetwork,
'Gym': AlphaZeroGymNetwork,
"Atari": AlphaZeroAtariNetwork
}
# Add your MuZero neural network architecture here by referencing the imported Class with a string key.
MuZeroNetworks = {
'Hex': MuZeroHexNetwork,
'Othello': MuZeroHexNetwork,
'Gym': MuZeroGymNetwork,
'Atari': MuZeroAtariNetwork
}
# Add different agent implementations for interacting with environments.
Players = {
"ALPHAZERO": DefaultAlphaZeroPlayer,
"MUZERO": DefaultMuZeroPlayer,
"BLIND_MUZERO": BlindMuZeroPlayer,
"RANDOM": RandomPlayer,
"DETERMINISTIC": DeterministicPlayer,
"MANUAL": ManualPlayer
}
|
py | b4061da1377bd335886fd28ffe6685fb85ec7825 | import os, shutil
original_dataset_dir = 'blog/dogs_cats/data/train'
base_dir = 'blog/dogs_cats/data'
if not os.path.exists(base_dir):
os.mkdir(base_dir)
# Create directories
train_dir = os.path.join(base_dir,'train')
if not os.path.exists(train_dir):
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir,'validation')
if not os.path.exists(validation_dir):
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir,'test')
if not os.path.exists(test_dir):
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir,'cats')
if not os.path.exists(train_cats_dir):
os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir,'dogs')
if not os.path.exists(train_dogs_dir):
os.mkdir(train_dogs_dir)
validation_cats_dir = os.path.join(validation_dir,'cats')
if not os.path.exists(validation_cats_dir):
os.mkdir(validation_cats_dir)
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
if not os.path.exists(validation_dogs_dir):
os.mkdir(validation_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'cats')
if not os.path.exists(test_cats_dir):
os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'dogs')
if not os.path.exists(test_dogs_dir):
os.mkdir(test_dogs_dir)
# Copy first 1000 cat images to train_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(100)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to validation_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(200, 250)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to test_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(250,300)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy first 1000 dog images to train_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(100)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to validation_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(200,250)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to test_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(250,300)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
# Sanity checks
print('total training cat images:', len(os.listdir(train_cats_dir)))
print('total training dog images:', len(os.listdir(train_dogs_dir)))
print('total validation cat images:', len(os.listdir(validation_cats_dir)))
print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
print('total test cat images:', len(os.listdir(test_cats_dir)))
print('total test dog images:', len(os.listdir(test_dogs_dir)))
|
py | b4061df4bbdba99c4aa8e30c9c0ce78cfa7e0a66 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v5.enums.types import tracking_code_page_format
from google.ads.googleads.v5.enums.types import tracking_code_type
__protobuf__ = proto.module(
package="google.ads.googleads.v5.common",
marshal="google.ads.googleads.v5",
manifest={"TagSnippet",},
)
class TagSnippet(proto.Message):
r"""The site tag and event snippet pair for a TrackingCodeType.
Attributes:
type_ (google.ads.googleads.v5.enums.types.TrackingCodeTypeEnum.TrackingCodeType):
The type of the generated tag snippets for
tracking conversions.
page_format (google.ads.googleads.v5.enums.types.TrackingCodePageFormatEnum.TrackingCodePageFormat):
The format of the web page where the tracking
tag and snippet will be installed, e.g. HTML.
global_site_tag (str):
The site tag that adds visitors to your basic
remarketing lists and sets new cookies on your
domain.
event_snippet (str):
The event snippet that works with the site
tag to track actions that should be counted as
conversions.
"""
type_ = proto.Field(
proto.ENUM,
number=1,
enum=tracking_code_type.TrackingCodeTypeEnum.TrackingCodeType,
)
page_format = proto.Field(
proto.ENUM,
number=2,
enum=tracking_code_page_format.TrackingCodePageFormatEnum.TrackingCodePageFormat,
)
global_site_tag = proto.Field(proto.STRING, number=5, optional=True)
event_snippet = proto.Field(proto.STRING, number=6, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | b4061e7ac6e731381c5d363a58b4a539a90f7b83 | import sys
from sklearn.svm import SVC
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
import re
from sklearn.metrics import accuracy_score
import numpy as np
def main(args):
(training_file, label_file, test_file, test_label, u_file) = args
X_training = load_feat(training_file)
n = len(X_training)
U = load_feat(u_file)
y_training = [int(line.strip()) for line in open(label_file)]
U = np.asarray(U)
X_training = np.asarray(X_training)
#X = preprocessing.normalize(X, norm='l2')
y_training = np.asarray(y_training)
X_test = load_feat(test_file)
y_test = [int(line.strip()) for line in open(test_label)]
X_test = np.asarray(X_test)
#test_X = preprocessing.normalize(test_X, norm='l2')
y_test = np.asarray(y_test)
cca = CCA(n_components=100)
(X_cca, U_cca) = cca.fit_transform(X_training, U[:n])
X_test_cca = cca.predict(X_test)
svr = SVC()
svr.fit(X_cca, y_training)
pred = svr.predict(X_test_cca)
print pred
print test_y
print accuracy_score(y_test, pred)
with open(test_file + '.cca.2.pred', 'w') as output:
for p in pred:
print >>output, p
#svm_model.fit(X, y)
#pickle.dump(lr, open(model_file, "wb"))
return
return
def load_feat(feat_file):
X = []
with open(feat_file) as feat:
for line in feat:
cols = re.split('\s+', line.strip())
features = [float(i) for i in cols]
X.append(features)
return X
if __name__ == '__main__':
if len(sys.argv) != 6:
print 'usage:python ccaQEpy <training-features> <training-label> <test-features> <test-labels> <U-file>'
sys.exit(1)
else:
main(sys.argv[1:])
|
py | b4061e97c9f1948dc150935b5d6f32a71bd006e9 | import time
import json
import pandas as pd
import re
import sys
from decimal import Decimal
from itertools import islice
from models.PyCryptoBot import PyCryptoBot
from models.helper.TelegramBotHelper import TelegramBotHelper as TGBot
from models.exchange.binance import PublicAPI as BPublicAPI
from models.exchange.coinbase_pro import PublicAPI as CPublicAPI
from models.exchange.kucoin import PublicAPI as KPublicAPI
from models.exchange.Granularity import Granularity
from models.exchange.ExchangesEnum import Exchange as CryptoExchange
from tradingview_ta import *
from importlib.metadata import version
def volatility_calculator(bollinger_band_upper, bollinger_band_lower, keltner_upper, keltner_lower, high, low):
"""
A break away from traditional volatility calculations. Based entirely
on the proportionate price gap between keltner channels, bolinger, and high / low averaged out
"""
try:
b_spread = Decimal(bollinger_band_upper) - Decimal(bollinger_band_lower)
k_spread = Decimal(keltner_upper) - Decimal(keltner_lower)
p_spread = Decimal(high) - Decimal(low)
except TypeError:
return 0
b_pcnt = abs(b_spread / Decimal(bollinger_band_lower)) * 100
k_pcnt = abs(k_spread / Decimal(keltner_lower)) * 100
chan_20_pcnt = (b_pcnt + k_pcnt) / 2
p_pcnt = abs(p_spread / Decimal(low)) * 100
return abs((chan_20_pcnt + p_pcnt) / 2)
def load_configs():
exchanges_loaded = []
try:
with open("screener.json", encoding='utf8') as json_file:
config = json.load(json_file)
except IOError as err:
raise(err)
try:
for exchange in config:
ex = CryptoExchange(exchange)
exchange_config = config[ex.value]
if ex == CryptoExchange.BINANCE:
binance_app = PyCryptoBot(exchange=ex)
binance_app.public_api = BPublicAPI()
binance_app.scanner_quote_currencies = exchange_config.get('quote_currency', ['USDT'])
binance_app.granularity = Granularity(Granularity.convert_to_enum(exchange_config.get('granularity', '1h')))
binance_app.adx_threshold = exchange_config.get('adx_threshold', 25)
binance_app.volatility_threshold = exchange_config.get('volatility_threshold', 9)
binance_app.minimum_volatility = exchange_config.get('minimum_volatility', 5)
binance_app.minimum_volume = exchange_config.get('minimum_volume', 20000)
binance_app.volume_threshold = exchange_config.get('volume_threshold', 20000)
binance_app.minimum_quote_price = exchange_config.get('minimum_quote_price', 0.0000001)
binance_app.selection_score = exchange_config.get('selection_score', 10)
binance_app.tv_screener_ratings = [rating.upper() for rating in exchange_config.get('tv_screener_ratings', ['STRONG_BUY'])]
exchanges_loaded.append(binance_app)
elif ex == CryptoExchange.COINBASEPRO:
coinbase_app = PyCryptoBot(exchange=ex)
coinbase_app.public_api = CPublicAPI()
coinbase_app.scanner_quote_currencies = exchange_config.get('quote_currency', ['USDT'])
coinbase_app.granularity = Granularity(Granularity.convert_to_enum(int(exchange_config.get('granularity', '3600'))))
coinbase_app.adx_threshold = exchange_config.get('adx_threshold', 25)
coinbase_app.volatility_threshold = exchange_config.get('volatility_threshold', 9)
coinbase_app.minimum_volatility = exchange_config.get('minimum_volatility', 5)
coinbase_app.minimum_volume = exchange_config.get('minimum_volume', 20000)
coinbase_app.volume_threshold = exchange_config.get('volume_threshold', 20000)
coinbase_app.minimum_quote_price = exchange_config.get('minimum_quote_price', 0.0000001)
coinbase_app.selection_score = exchange_config.get('selection_score', 10)
coinbase_app.tv_screener_ratings = [rating.upper() for rating in exchange_config.get('tv_screener_ratings', ['STRONG_BUY'])]
exchanges_loaded.append(coinbase_app)
elif ex == CryptoExchange.KUCOIN:
kucoin_app = PyCryptoBot(exchange=ex)
kucoin_app.public_api = KPublicAPI()
kucoin_app.scanner_quote_currencies = exchange_config.get('quote_currency', ['USDT'])
kucoin_app.granularity = Granularity(Granularity.convert_to_enum(exchange_config.get('granularity', '1h')))
kucoin_app.adx_threshold = exchange_config.get('adx_threshold', 25)
kucoin_app.volatility_threshold = exchange_config.get('volatility_threshold', 9)
kucoin_app.minimum_volatility = exchange_config.get('minimum_volatility', 5)
kucoin_app.minimum_volume = exchange_config.get('minimum_volume', 20000)
kucoin_app.volume_threshold = exchange_config.get('volume_threshold', 20000)
kucoin_app.minimum_quote_price = exchange_config.get('minimum_quote_price', 0.0000001)
kucoin_app.selection_score = exchange_config.get('selection_score', 10)
kucoin_app.tv_screener_ratings = [rating.upper() for rating in exchange_config.get('tv_screener_ratings', ['STRONG_BUY'])]
exchanges_loaded.append(kucoin_app)
else:
raise ValueError(f"Invalid exchange found in config: {ex}")
except AttributeError as e:
print(f"Invalid exchange: {e}...ignoring.")
return exchanges_loaded
def chunker(market_list, chunk_size):
markets = iter(market_list)
market_chunk = list(islice(markets, chunk_size))
while market_chunk:
yield market_chunk
market_chunk = list(islice(markets, chunk_size))
def get_markets(app, quote_currency):
markets = []
quote_currency = quote_currency.upper()
api = app.public_api
resp = api.getMarkets24HrStats()
if app.exchange == CryptoExchange.BINANCE:
for row in resp:
if row["symbol"].endswith(quote_currency):
markets.append(row['symbol'])
elif app.exchange == CryptoExchange.COINBASEPRO:
for market in resp:
market = str(market)
if market.endswith(f"-{quote_currency}"):
markets.append(market)
elif app.exchange == CryptoExchange.KUCOIN:
results = resp["data"]["ticker"]
for result in results:
if result["symbol"].endswith(f"-{quote_currency}"):
markets.append(result['symbol'])
return markets
def process_screener_data(app, markets, quote_currency, exchange_name):
"""
Hit TradingView up for the goods so we don't waste unnecessary time/compute resources (brandon's top picks)
"""
# Do you want it to spit out all the debug stuff?
debug = False
ta_screener_list = [f"{re.sub('PRO', '', app.exchange.name, re.IGNORECASE)}:{re.sub('-', '', market)}" for market in markets]
screener_staging = [p for p in chunker(ta_screener_list, 100)]
screener_analysis = []
additional_indicators = ["ATR", "KltChnl.upper", "KltChnl.lower"]
#TradingView.indicators.append("Volatility.D")
for pair_list in screener_staging:
screener_analysis.extend([a for a in get_multiple_analysis(screener='crypto', interval=app.granularity.short, symbols=pair_list, additional_indicators=additional_indicators).values()])
# Take what we need and do magic, ditch the rest.
formatted_ta = []
for ta in screener_analysis:
try:
if debug : print(f"Checking {ta.symbol} on {exchange_name}\n")
recommend = Decimal(ta.indicators.get('Recommend.All'))
volatility = Decimal(volatility_calculator(ta.indicators['BB.upper'], ta.indicators['BB.lower'], ta.indicators['KltChnl.upper'], ta.indicators['KltChnl.lower'], ta.indicators['high'], ta.indicators['low']))
#volatility = Decimal(ta.indicators['Volatility.D']) * 100
adx = abs(Decimal(ta.indicators['ADX']))
adx_posi_di = Decimal(ta.indicators['ADX+DI'])
adx_neg_di = Decimal(ta.indicators['ADX-DI'])
high = Decimal(ta.indicators['high']).quantize(Decimal('1e-{}'.format(8)))
low = Decimal(ta.indicators['low']).quantize(Decimal('1e-{}'.format(8)))
close = Decimal(ta.indicators['close']).quantize(Decimal('1e-{}'.format(8)))
# ATR normalised
atr = (Decimal(ta.indicators['ATR']) / close * 100).quantize(Decimal('1e-{}'.format(2))) if "ATR" in ta.indicators else 0
volume = Decimal(ta.indicators['volume'])
macd = Decimal(ta.indicators['MACD.macd'])
macd_signal = Decimal(ta.indicators['MACD.signal'])
bollinger_upper = Decimal(ta.indicators['BB.upper'])
bollinger_lower = Decimal(ta.indicators['BB.lower'])
kelt_upper = Decimal(ta.indicators['KltChnl.upper'])
kelt_lower = Decimal(ta.indicators['KltChnl.lower'])
rsi = Decimal(ta.indicators.get('RSI', 0))
stoch_d = Decimal(ta.indicators.get('Stoch.D', 0))
stoch_k = Decimal(ta.indicators.get('Stoch.K', 0))
williams_r = Decimal(ta.indicators.get('W.R', 0))
score = 0
analysis_summary = ta.summary
rating = ta.summary["RECOMMENDATION"]
#print(close)
if rating == "SELL":
score -= 2.5
elif rating == "STRONG_SELL":
score -= 5
elif rating == "NEUTRAL":
score += 0
elif rating == "BUY":
score += 2.5
elif rating == "STRONG_BUY":
score += 5
if (adx >= app.adx_threshold) and (adx_posi_di > adx_neg_di) and (adx_posi_di > adx):
if debug : print(f"ADX({adx}) >= {app.adx_threshold}")
score += 1
if volume >= app.volume_threshold:
if debug : print(f"Volume({volume}) >= {app.volume_threshold}")
score += 1
if abs(macd) > abs(macd_signal):
if debug : print(f"MACD({macd}) above signal({macd_signal})")
score += 1
if volatility >= app.volatility_threshold:
if debug : print(f"Volatility({volatility} is above {app.volatility_threshold}")
score += 1
if volatility < app.minimum_volatility:
if debug : print(f"{ta.symbol} ({volatility}) is below min volatility of {app.minimum_volatility}")
score -= 100
if volume < app.minimum_volume:
if debug : print(f"{ta.symbol} ({volume}) is below min volume of {app.volume}")
score -= 100
if close < app.minimum_quote_price:
if debug : print(f"{ta.symbol} ({close}) is below min quote price of {app.minimum_quote_price}")
score -= 100
if 30 >= rsi > 20:
score += 1
if 20 < stoch_d <= 30:
score += 1
if stoch_k > stoch_d:
score += 1
if williams_r <= -30:
score += 1
#print('symbol\tscore\tvolume\tvvolatilith\tadx\tadx_posi_di\tadx_neg_di\tmacd\tmacd_signal\tbollinger_upper\tbollinger_lower\trecommend')
#print(ta.symbol, score, volume, volatility, adx, adx_posi_di, adx_neg_di, macd, macd_signal, bollinger_upper, bollinger_lower, recommend, "\n")
#print(f"Symbol: {ta.symbol} Score: {score}/{app.selection_score} Rating: {rating}")
if (score >= app.selection_score) and (rating in app.tv_screener_ratings):
relavent_ta = {}
if app.exchange == CryptoExchange.COINBASEPRO or app.exchange == CryptoExchange.KUCOIN:
relavent_ta['market'] = re.sub(rf'(.*){quote_currency}', rf'\1-{quote_currency}', ta.symbol)
#relavent_ta['market'] = re.sub(quote_currency,f"-{quote_currency}", ta.symbol)
else:
relavent_ta['market'] = ta.symbol
#relavent_ta['market'] = ta.symbol
relavent_ta['recommend'] = recommend
relavent_ta['volume'] = volume
relavent_ta['volatility'] = volatility
relavent_ta['adx'] = adx
relavent_ta['adx+di'] = adx_posi_di
relavent_ta['adx-di'] = adx_neg_di
relavent_ta['macd'] = macd
relavent_ta['macd.signal'] = macd_signal
relavent_ta['bollinger_upper'] = bollinger_upper
relavent_ta['bollinger_lower'] = bollinger_lower
relavent_ta['rsi'] = rsi
relavent_ta['stoch_d'] = stoch_d
relavent_ta['stoch_k'] = stoch_k
relavent_ta['williamsR'] = williams_r
relavent_ta['rating'] = rating
relavent_ta['score'] = score
## Hack a percentage from the recommendation which would take into account all the indicators rather than just ATR
if atr > 0:
relavent_ta['atr72_pcnt'] = atr
#else:
# if recommend > 0:
# relavent_ta['atr72_pcnt'] = recommend * 100
else:
relavent_ta['atr72_pcnt'] = 0
try:
relavent_ta['buy_next'] = 'SEND IT!' if re.search('BUY', rating) else False
except AttributeError:
relavent_ta['buy_next'] = False
formatted_ta.append(relavent_ta)
except Exception as e:
pass
if formatted_ta:
# Stick it in a DF for the bots
df_markets = pd.DataFrame(formatted_ta)
df_markets = df_markets[["market", "score", "recommend", "volume", "volatility", "adx", "adx+di", "adx-di", "macd", "macd.signal", "bollinger_upper", "bollinger_lower", "rsi", "stoch_d", "stoch_k", "williamsR", "rating", "buy_next", "atr72_pcnt"]]
df_markets.columns = ["market", "score", "recommend", "volume", "volatility", "adx", "adx+di", "adx-di", "macd", "macd.signal", "bollinger_upper", "bollinger_lower", "rsi", "stoch_d", "stoch_k", "williamsR", "rating", "buy_next", "atr72_pcnt"]
df_markets["score"] = df_markets["score"].astype(float).round(0).astype(int)
df_markets["recommend"] = df_markets["recommend"].astype(float)
df_markets["volume"] = df_markets["volume"].astype(float).round(0).astype(int)
df_markets["volatility"] = df_markets["volatility"].astype(float)
df_markets["adx"] = df_markets["adx"].astype(float)
df_markets["adx+di"] = df_markets["adx+di"].astype(float)
df_markets["adx-di"] = df_markets["adx-di"].astype(float)
df_markets["macd"] = df_markets["macd"].astype(float)
df_markets["macd.signal"] = df_markets["macd.signal"].astype(float)
df_markets["bollinger_upper"] = df_markets["bollinger_upper"].astype(float)
df_markets["bollinger_lower"] = df_markets["bollinger_lower"].astype(float)
df_markets['rsi'] = df_markets['rsi'].astype(float)
df_markets['stoch_d'] = df_markets['stoch_d'].astype(float)
df_markets['stoch_k'] = df_markets['stoch_k'].astype(float)
df_markets['williamsR'] = df_markets['williamsR'].astype(float)
df_markets['atr72_pcnt'] = df_markets['atr72_pcnt'].astype(float)
df_markets.sort_values(by=["market"], ascending=True, inplace=True)
df_markets.set_index("market", inplace=True)
print(
df_markets.sort_values(
by=["buy_next", "atr72_pcnt"], ascending=[False, False], inplace=False
)
)
TGBot(app, scanner=True).save_scanner_output(app.exchange.value, quote_currency, df_markets)
else:
blank_data = {}
blank_data["buy_next"] = False
blank_data["atr72_pcnt"] = 0
blank_data["volume"] = 0
formatted_ta.append(blank_data)
df_markets = pd.DataFrame(formatted_ta)
TGBot(app, scanner=True).save_scanner_output(app.exchange.value, quote_currency, df_markets)
print('No pairs found!')
return True
if __name__ == '__main__':
import time
from datetime import datetime
tvlib_ver = version('tradingview-ta')
if tvlib_ver >= "3.2.10":
print(f"Library is correct version - were good to go! (v {tvlib_ver})")
else:
print(f"Gotta update your tradingview-ta library please! (v {tvlib_ver})")
sys.exit()
start_time = time.time()
print('Processing, please wait...')
bootstrap_exchanges = load_configs()
for app in bootstrap_exchanges:
print(f"\n\n{app.exchange.name}")
for quote_currency in app.scanner_quote_currencies:
markets = get_markets(app, quote_currency)
try:
process_screener_data(app, markets, quote_currency, app.exchange.name)
except Exception as e:
print(e)
print("Scan run finished!")
print(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"Total elapsed time: {time.time() - start_time} sec")
|
py | b4061e9e5a8f00eb9d4c55b5076186b2012ec8fd | import numpy as np
from ..gap_tools import check_gaps
def bbox_mask(t_arr, x_arr, limits):
"""
Just a wrapper for np.where
"""
#NOTE: t_arr is included but no longer used
mask = np.where(
(x_arr >= limits[0]) & \
(x_arr <= limits[1]))[0]
return mask
def get_mask(loc_t_arr, loc_x_arr, loc, t_thresh, d_thresh, verbose=False):
"""
"""
# Get the corresponding indices and ultimately adjust the bin
mask = bbox_mask(loc_t_arr, loc_x_arr, [loc-d_thresh,loc+d_thresh])
# check if empty
if mask.size <= 0:
if verbose: print('\t\tmask empty (1)')
#print(loc_t_arr)
#print(loc_x_arr, loc-d_thresh, loc, loc+d_thresh)
return np.empty([])
if verbose: print("\t\tmask 1:", mask[0], mask[-1])
#tbounds = [loc_t_arr[mask].min(), loc_t_arr[mask].max()]
#print(tbounds)
#mask = np.where((loc_t_arr >= loc_t_arr[mask].min()) & \
# (loc_t_arr <= loc_t_arr[mask].max()))[0]
# Check if the loc_t_arr comtains gaps > time_thresh; if so, split and repeat
gap_mask = check_gaps(loc_t_arr[mask], t_thresh, verbose)
# Get those mask indices corresponding to the longest (sub)cluster
mask = mask[gap_mask]
# check if empty
if mask.size <= 0:
if verbose: print('\t\tmask empty (2)')
return np.empty([])
if verbose:
print("\t\tmask 2:",mask[0],mask[-1])
return mask
def update_global_mask(g_mask, arr, lims):
"""
Update a Boolean array with keeps track of classified events.
"""
#Note: this is easier than tracking & storing the indices, for now.
# Get the indices of the events within the cluster limits
mask1 = np.where((arr >= lims.min()) & (arr <= lims.max()))
# Store those indices as False (removing the events from processing)
g_mask[mask1] = False
return g_mask
def get_mask_ends(loc_t_arr, loc_x_arr, loc, t_thresh, d_thresh, verbose=False):
"""
"""
mask = np.where((loc_x_arr <= loc+1.0*d_thresh) & (loc_x_arr >= loc-1.0*d_thresh))[0]
diffs = np.diff(loc_t_arr[mask])
dmask = np.where(diffs>t_thresh)[0].tolist()
ddmask = [-1] + dmask + [mask.size-1]
return [[mask[p[0]+1],mask[p[1]]] for p in list(zip(ddmask[:-1], ddmask[1:]))]
|
py | b4061f8fb4c2f84d69ea91e8339ef145a48fb59a | from typing import Protocol
class Command(Protocol):
def Details(self) -> str:
...
def Execute(self) -> None:
...
def Undo(self) -> None:
...
def Redo(self) -> None:
...
|
py | b4061fa0dd6aefad3eac2d6755d8b53c3c1b6fe4 | from cffi import FFI
ffibuilder = FFI()
# cdef() expects a single string declaring the C types, functions and
# globals needed to use the shared object. It must be in valid C syntax.
with open('ddcutil_gen.h', 'r') as header_file:
ffibuilder.cdef(header_file.read())
# set_source() gives the name of the python extension module to
# produce, and some C source code as a string. This C code needs
# to make the declarated functions, types and globals available,
# so it is often just the "#include".
ffibuilder.set_source("_ddc_cffi",
"""
#include "ddcutil_c_api.h"
#include "ddcutil_types.h"
""",
libraries=['ddcutil']) # library name, for the linker
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
|
py | b40620324d946482e6e00a237ab0ab4dd5c1ba5f | from .eliminare import Eliminare
from .riempire_na import RiempireNAItemWeight
from .riempire_na import RiempireNAMedia
from .riempire_na import RiempireNAOutletSize
from .ottenere_dummy import OttenereDummy
from .standardizzare import Standardizzare
from .sbiancare import Sbiancare
from .sostituire import Sostituire
from .standardizzare import Standardizzare
from .tweet_analyzer import tweet_analyzer
|
py | b40625b66384aac6d8e4036c750eec17408842af | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattercarpet.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattercarpet.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattercarpet.unselected.M
arker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattercarpet.unselected.T
extfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Unselected`
marker
:class:`plotly.graph_objects.scattercarpet.unselected.M
arker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattercarpet.unselected.T
extfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__("unselected")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Unselected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import unselected as v_unselected
# Initialize validators
# ---------------------
self._validators["marker"] = v_unselected.MarkerValidator()
self._validators["textfont"] = v_unselected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
self["marker"] = marker if marker is not None else _v
_v = arg.pop("textfont", None)
self["textfont"] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Textfont`
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import textfont as v_textfont
# Initialize validators
# ---------------------
self._validators["color"] = v_textfont.ColorValidator()
self._validators["colorsrc"] = v_textfont.ColorsrcValidator()
self._validators["family"] = v_textfont.FamilyValidator()
self._validators["familysrc"] = v_textfont.FamilysrcValidator()
self._validators["size"] = v_textfont.SizeValidator()
self._validators["sizesrc"] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattercarpet.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scattercarpet.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattercarpet.selected.Mar
ker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattercarpet.selected.Tex
tfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Selected`
marker
:class:`plotly.graph_objects.scattercarpet.selected.Mar
ker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattercarpet.selected.Tex
tfont` instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Selected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import selected as v_selected
# Initialize validators
# ---------------------
self._validators["marker"] = v_selected.MarkerValidator()
self._validators["textfont"] = v_selected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
self["marker"] = marker if marker is not None else _v
_v = arg.pop("textfont", None)
self["textfont"] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scattercarpet.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter
carpet.marker.colorbar.Tickformatstop`
instances or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattercarpet.marker.colorbar.tickformatstopd
efaults), sets the default property values to
use for elements of
scattercarpet.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattercarpet.mark
er.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
scattercarpet.marker.colorbar.title.font
instead. Sets this color bar's title font. Note
that the title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattercarpet.marker.colorbar.title.side
instead. Determines the location of color bar's
title with respect to the color bar. Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattercarpet.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# gradient
# --------
@property
def gradient(self):
"""
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.marker.Gradient`
- A dict of string/value properties that will be passed
to the Gradient constructor
Supported dict properties:
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical.
colorsrc
Sets the source reference on plot.ly for color
.
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on plot.ly for type
.
Returns
-------
plotly.graph_objs.scattercarpet.marker.Gradient
"""
return self["gradient"]
@gradient.setter
def gradient(self, val):
self["gradient"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.scattercarpet.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# maxdisplayed
# ------------
@property
def maxdisplayed(self):
"""
Sets a maximum number of points to be drawn on the graph. 0
corresponds to no limit.
The 'maxdisplayed' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["maxdisplayed"]
@maxdisplayed.setter
def maxdisplayed(self, val):
self["maxdisplayed"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on plot.ly for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on plot.ly for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattercarpet.marker.Color
Bar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
:class:`plotly.graph_objects.scattercarpet.marker.Gradi
ent` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scattercarpet.marker.Line`
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
maxdisplayed=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattercarpet.marker.Color
Bar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
:class:`plotly.graph_objects.scattercarpet.marker.Gradi
ent` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scattercarpet.marker.Line`
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import marker as v_marker
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_marker.AutocolorscaleValidator()
self._validators["cauto"] = v_marker.CautoValidator()
self._validators["cmax"] = v_marker.CmaxValidator()
self._validators["cmid"] = v_marker.CmidValidator()
self._validators["cmin"] = v_marker.CminValidator()
self._validators["color"] = v_marker.ColorValidator()
self._validators["coloraxis"] = v_marker.ColoraxisValidator()
self._validators["colorbar"] = v_marker.ColorBarValidator()
self._validators["colorscale"] = v_marker.ColorscaleValidator()
self._validators["colorsrc"] = v_marker.ColorsrcValidator()
self._validators["gradient"] = v_marker.GradientValidator()
self._validators["line"] = v_marker.LineValidator()
self._validators["maxdisplayed"] = v_marker.MaxdisplayedValidator()
self._validators["opacity"] = v_marker.OpacityValidator()
self._validators["opacitysrc"] = v_marker.OpacitysrcValidator()
self._validators["reversescale"] = v_marker.ReversescaleValidator()
self._validators["showscale"] = v_marker.ShowscaleValidator()
self._validators["size"] = v_marker.SizeValidator()
self._validators["sizemin"] = v_marker.SizeminValidator()
self._validators["sizemode"] = v_marker.SizemodeValidator()
self._validators["sizeref"] = v_marker.SizerefValidator()
self._validators["sizesrc"] = v_marker.SizesrcValidator()
self._validators["symbol"] = v_marker.SymbolValidator()
self._validators["symbolsrc"] = v_marker.SymbolsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorbar", None)
self["colorbar"] = colorbar if colorbar is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("gradient", None)
self["gradient"] = gradient if gradient is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("maxdisplayed", None)
self["maxdisplayed"] = maxdisplayed if maxdisplayed is not None else _v
_v = arg.pop("opacity", None)
self["opacity"] = opacity if opacity is not None else _v
_v = arg.pop("opacitysrc", None)
self["opacitysrc"] = opacitysrc if opacitysrc is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("showscale", None)
self["showscale"] = showscale if showscale is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizemin", None)
self["sizemin"] = sizemin if sizemin is not None else _v
_v = arg.pop("sizemode", None)
self["sizemode"] = sizemode if sizemode is not None else _v
_v = arg.pop("sizeref", None)
self["sizeref"] = sizeref if sizeref is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
_v = arg.pop("symbol", None)
self["symbol"] = symbol if symbol is not None else _v
_v = arg.pop("symbolsrc", None)
self["symbolsrc"] = symbolsrc if symbolsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# shape
# -----
@property
def shape(self):
"""
Determines the line shape. With "spline" the lines are drawn
using spline interpolation. The other available values
correspond to step-wise line shapes.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'spline']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# smoothing
# ---------
@property
def smoothing(self):
"""
Has an effect only if `shape` is set to "spline" Sets the
amount of smoothing. 0 corresponds to no smoothing (equivalent
to a "linear" shape).
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
color=None,
dash=None,
shape=None,
smoothing=None,
width=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import line as v_line
# Initialize validators
# ---------------------
self._validators["color"] = v_line.ColorValidator()
self._validators["dash"] = v_line.DashValidator()
self._validators["shape"] = v_line.ShapeValidator()
self._validators["smoothing"] = v_line.SmoothingValidator()
self._validators["width"] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("dash", None)
self["dash"] = dash if dash is not None else _v
_v = arg.pop("shape", None)
self["shape"] = shape if shape is not None else _v
_v = arg.pop("smoothing", None)
self["smoothing"] = smoothing if smoothing is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scattercarpet.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattercarpet"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattercarpet import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Hoverlabel",
"Line",
"Marker",
"Selected",
"Stream",
"Textfont",
"Unselected",
"hoverlabel",
"marker",
"selected",
"unselected",
]
from plotly.graph_objs.scattercarpet import unselected
from plotly.graph_objs.scattercarpet import selected
from plotly.graph_objs.scattercarpet import marker
from plotly.graph_objs.scattercarpet import hoverlabel
|
py | b406268e87fc5ecd7b7bdaa10dade1e82918cce9 | import os
import re
import setuptools
HERE = os.path.abspath(os.path.dirname(__file__))
VERSION_PY = ["version.py"]
REQUIRED_PCKGS = [
'bokeh',
]
def read(*args):
"""Read complete file contest."""
fp = os.path.join(HERE, *args)
with open(fp) as fh:
return fh.read()
def get_requirements():
"""Read the requirements file."""
requirements = read("requirements.txt")
return [r for r in requirements.strip().splitlines()]
def get_version():
"""Parse version from file."""
version_file = read(*VERSION_PY)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string")
setuptools.setup(
install_requires=get_requirements(),
version=get_version(),
)
|
py | b40629241909ae188296ebc5ec566835b1ca45bb | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import random, sys, time
from bisect import insort, bisect_left
from functools import wraps
from whoosh.compat import xrange
# These must be valid separate characters in CASE-INSENSTIVE filenames
IDCHARS = "0123456789abcdefghijklmnopqrstuvwxyz"
if hasattr(time, "perf_counter"):
now = time.perf_counter
elif sys.platform == 'win32':
now = time.clock
else:
now = time.time
def random_name(size=28):
return "".join(random.choice(IDCHARS) for _ in xrange(size))
def make_binary_tree(fn, args, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
arguments and returns a binary tree of results/instances.
>>> make_binary_tree(UnionMatcher, [matcher1, matcher2, matcher3])
UnionMatcher(matcher1, UnionMatcher(matcher2, matcher3))
Any keyword arguments given to this function are passed to the class
initializer.
"""
count = len(args)
if not count:
raise ValueError("Called make_binary_tree with empty list")
elif count == 1:
return args[0]
half = count // 2
return fn(make_binary_tree(fn, args[:half], **kwargs),
make_binary_tree(fn, args[half:], **kwargs), **kwargs)
def make_weighted_tree(fn, ls, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
(weight, argument) tuples and returns a huffman-like weighted tree of
results/instances.
"""
if not ls:
raise ValueError("Called make_weighted_tree with empty list")
ls.sort()
while len(ls) > 1:
a = ls.pop(0)
b = ls.pop(0)
insort(ls, (a[0] + b[0], fn(a[1], b[1])))
return ls[0][1]
# Fibonacci function
_fib_cache = {}
def fib(n):
"""Returns the nth value in the Fibonacci sequence.
"""
if n <= 2:
return n
if n in _fib_cache:
return _fib_cache[n]
result = fib(n - 1) + fib(n - 2)
_fib_cache[n] = result
return result
# Decorators
def synchronized(func):
"""Decorator for storage-access methods, which synchronizes on a threading
lock. The parent object must have 'is_closed' and '_sync_lock' attributes.
"""
@wraps(func)
def synchronized_wrapper(self, *args, **kwargs):
with self._sync_lock:
return func(self, *args, **kwargs)
return synchronized_wrapper
|
gyp | b406299d5b754a6c11e95054ed0559a4bfc07509 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
# TODO(dmaclach): can we pick this up some other way? Right now it's
# duplicated from chrome.gyp
'chromium_code': 1,
# Use consistent strings across all platforms. Note that the plugin name
# is brand-dependent and is defined further down.
# Must match host/plugin/constants.h
'host_plugin_mime_type': 'application/vnd.chromium.remoting-host',
'host_plugin_description': 'Allow another user to access your computer securely over the Internet.',
# The version is composed from major & minor versions specific to remoting
# and build & patch versions inherited from Chrome.
'version_py_path': '../chrome/tools/build/version.py',
'version_path': '../remoting/VERSION',
'chrome_version_path': '../chrome/VERSION',
'version_full':
'<!(python <(version_py_path) -f <(version_path) -t "@MAJOR@.@MINOR@").'
'<!(python <(version_py_path) -f <(chrome_version_path) -t "@BUILD@.@PATCH@")',
'version_short':
'<!(python <(version_py_path) -f <(version_path) -t "@MAJOR@.@MINOR@").'
'<!(python <(version_py_path) -f <(chrome_version_path) -t "@BUILD@")',
'conditions': [
['OS=="mac"', {
'conditions': [
['branding=="Chrome"', {
'mac_bundle_id': 'com.google.Chrome',
'mac_creator': 'rimZ',
}, { # else: branding!="Chrome"
'mac_bundle_id': 'org.chromium.Chromium',
'mac_creator': 'Cr24',
}], # branding
], # conditions
'host_plugin_extension': 'plugin',
'host_plugin_prefix': '',
}],
['os_posix == 1 and OS != "mac" and target_arch == "ia32"', {
# linux 32 bit
'host_plugin_extension': 'ia32.so',
'host_plugin_prefix': 'lib',
}],
['os_posix == 1 and OS != "mac" and target_arch == "x64"', {
# linux 64 bit
'host_plugin_extension': 'x64.so',
'host_plugin_prefix': 'lib',
}],
['os_posix == 1 and OS != "mac" and target_arch == "arm"', {
# linux 64 bit
'host_plugin_extension': 'arm.so',
'host_plugin_prefix': 'lib',
}],
['OS=="win"', {
'host_plugin_extension': 'dll',
'host_plugin_prefix': '',
}],
['branding=="Chrome"', {
# Must match host/plugin/constants.h
'host_plugin_name': 'Chrome Remote Desktop Host',
'remoting_webapp_locale_files': [
'webapp/_locales.official/ar/messages.json',
'webapp/_locales.official/bg/messages.json',
'webapp/_locales.official/ca/messages.json',
'webapp/_locales.official/cs/messages.json',
'webapp/_locales.official/da/messages.json',
'webapp/_locales.official/de/messages.json',
'webapp/_locales.official/el/messages.json',
'webapp/_locales.official/en/messages.json',
'webapp/_locales.official/en_GB/messages.json',
'webapp/_locales.official/es/messages.json',
'webapp/_locales.official/es_419/messages.json',
'webapp/_locales.official/et/messages.json',
'webapp/_locales.official/fi/messages.json',
'webapp/_locales.official/fil/messages.json',
'webapp/_locales.official/fr/messages.json',
'webapp/_locales.official/he/messages.json',
'webapp/_locales.official/hi/messages.json',
'webapp/_locales.official/hr/messages.json',
'webapp/_locales.official/hu/messages.json',
'webapp/_locales.official/id/messages.json',
'webapp/_locales.official/it/messages.json',
'webapp/_locales.official/ja/messages.json',
'webapp/_locales.official/ko/messages.json',
'webapp/_locales.official/lt/messages.json',
'webapp/_locales.official/lv/messages.json',
'webapp/_locales.official/nb/messages.json',
'webapp/_locales.official/nl/messages.json',
'webapp/_locales.official/pl/messages.json',
'webapp/_locales.official/pt_BR/messages.json',
'webapp/_locales.official/pt_PT/messages.json',
'webapp/_locales.official/ro/messages.json',
'webapp/_locales.official/ru/messages.json',
'webapp/_locales.official/sk/messages.json',
'webapp/_locales.official/sl/messages.json',
'webapp/_locales.official/sr/messages.json',
'webapp/_locales.official/sv/messages.json',
'webapp/_locales.official/th/messages.json',
'webapp/_locales.official/tr/messages.json',
'webapp/_locales.official/uk/messages.json',
'webapp/_locales.official/vi/messages.json',
'webapp/_locales.official/zh_CN/messages.json',
'webapp/_locales.official/zh_TW/messages.json',
],
}, { # else: branding!="Chrome"
# Must match host/plugin/constants.h
'host_plugin_name': 'Chromoting Host',
'remoting_webapp_locale_files': [
'webapp/_locales/en/messages.json',
],
}],
],
'remoting_webapp_files': [
'resources/icon_cross.png',
'resources/icon_host.png',
'resources/icon_pencil.png',
'resources/icon_warning.png',
'webapp/client_plugin.js',
'webapp/client_plugin_async.js',
'webapp/client_plugin_v1.js',
'webapp/client_screen.js',
'webapp/client_session.js',
'webapp/clipboard.js',
'webapp/connection_history.css',
'webapp/connection_history.js',
'webapp/connection_stats.css',
'webapp/connection_stats.js',
'webapp/cs_oauth2_trampoline.js',
'webapp/event_handlers.js',
'webapp/format_iq.js',
'webapp/host_controller.js',
'webapp/host_list.js',
'webapp/host_screen.js',
'webapp/host_session.js',
'webapp/host_setup_dialog.js',
'webapp/host_table_entry.js',
'webapp/l10n.js',
'webapp/log_to_server.js',
'webapp/main.css',
'webapp/main.html',
'webapp/manifest.json',
'webapp/menu_button.css',
'webapp/menu_button.js',
'webapp/oauth2.js',
'webapp/oauth2_callback.html',
'webapp/plugin_settings.js',
'webapp/remoting.js',
'webapp/scale-to-fit.png',
'webapp/server_log_entry.js',
'webapp/spinner.gif',
'webapp/stats_accumulator.js',
'webapp/toolbar.css',
'webapp/toolbar.js',
'webapp/ui_mode.js',
'webapp/wcs.js',
'webapp/wcs_loader.js',
'webapp/xhr.js',
'resources/chromoting16.png',
'resources/chromoting48.png',
'resources/chromoting128.png',
'resources/disclosure_arrow_down.png',
'resources/disclosure_arrow_right.png',
'resources/infographic_my_computers.png',
'resources/infographic_remote_assistance.png',
'resources/tick.png',
],
'remoting_host_installer_mac_roots': [
'host/installer/mac/',
'<(DEPTH)/chrome/installer/mac/',
],
'remoting_host_installer_mac_files': [
'host/installer/mac/do_signing.sh',
'host/installer/mac/ChromotingHost.packproj',
'host/installer/mac/ChromotingHostService.packproj',
'host/installer/mac/ChromotingHostUninstaller.packproj',
'host/installer/mac/LaunchAgents/org.chromium.chromoting.plist',
'host/installer/mac/PrivilegedHelperTools/org.chromium.chromoting.me2me.sh',
'host/installer/mac/Scripts/keystone_install.sh',
'host/installer/mac/Scripts/remoting_postflight.sh',
'host/installer/mac/Scripts/remoting_preflight.sh',
'host/installer/mac/Keystone/GoogleSoftwareUpdate.pkg.zip',
'<(DEPTH)/chrome/installer/mac/pkg-dmg',
],
},
'target_defaults': {
'defines': [
],
'include_dirs': [
'..', # Root of Chrome checkout
],
},
'conditions': [
['OS=="linux"', {
'targets': [
# Linux breakpad processing
{
'target_name': 'remoting_linux_symbols',
'type': 'none',
'conditions': [
['linux_dump_symbols==1', {
'actions': [
{
'action_name': 'dump_symbols',
'variables': {
'plugin_file': '<(host_plugin_prefix)remoting_host_plugin.<(host_plugin_extension)',
},
'inputs': [
'<(DEPTH)/build/linux/dump_app_syms',
'<(PRODUCT_DIR)/dump_syms',
'<(PRODUCT_DIR)/<(plugin_file)',
],
'outputs': [
'<(PRODUCT_DIR)/<(plugin_file).breakpad.<(target_arch)',
],
'action': ['<(DEPTH)/build/linux/dump_app_syms',
'<(PRODUCT_DIR)/dump_syms',
'<(linux_strip_binary)',
'<(PRODUCT_DIR)/<(plugin_file)',
'<@(_outputs)'],
'message': 'Dumping breakpad symbols to <(_outputs)',
'process_outputs_as_sources': 1,
},
],
'dependencies': [
'remoting_host_plugin',
'../breakpad/breakpad.gyp:dump_syms',
],
}], # 'linux_dump_symbols==1'
], # end of 'conditions'
}, # end of target 'linux_symbols'
], # end of 'targets'
}], # 'OS=="linux"'
['OS=="mac"', {
'targets': [
{
'target_name': 'remoting_host_uninstaller',
'type': 'executable',
'mac_bundle': 1,
'conditions': [
['branding == "Chrome"', {
'variables': {
'copyright_by': 'Google Inc.',
'bundle_id': 'com.google.chromeremotedesktop.host_uninstaller',
'bundle_name': 'Chrome Remote Desktop Host Uninstaller',
},
}, { # else branding!="Chrome"
'variables': {
'copyright_by': 'The Chromium Authors.',
'bundle_id': 'org.chromium.remoting.host_uninstaller',
'bundle_name': 'Chromoting Host Uninstaller',
},
}],
],
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
],
'sources': [
'host/installer/mac/uninstaller/remoting_uninstaller.h',
'host/installer/mac/uninstaller/remoting_uninstaller.mm',
],
'xcode_settings': {
'INFOPLIST_FILE': 'host/installer/mac/uninstaller/remoting_uninstaller-Info.plist',
'INFOPLIST_PREPROCESS': 'YES',
'INFOPLIST_PREPROCESSOR_DEFINITIONS': 'VERSION_FULL="<(version_full)" VERSION_SHORT="<(version_short)" BUNDLE_NAME="<(bundle_name)" BUNDLE_ID="<(bundle_id)" COPYRIGHT_BY="<(copyright_by)"',
},
'mac_bundle_resources': [
'host/installer/mac/uninstaller/remoting_uninstaller.icns',
'host/installer/mac/uninstaller/remoting_uninstaller.xib',
'host/installer/mac/uninstaller/remoting_uninstaller-Info.plist',
],
'mac_bundle_resources!': [
'host/installer/mac/uninstaller/remoting_uninstaller-Info.plist',
],
}, # end of target 'remoting_host_uninstaller'
# This packages up the files needed for the remoting host installer so
# they can be sent off to be signed.
# We don't build an installer here because we don't have signed binaries.
{
'target_name': 'remoting_me2me_host_archive',
'type': 'none',
'dependencies': [
'remoting_me2me_host',
'remoting_host_uninstaller',
],
'sources': [
'host/installer/build-installer-archive.py',
'<@(remoting_host_installer_mac_files)',
],
'conditions': [
['branding == "Chrome"', {
'variables': {
'copyright_by': 'Google Inc.',
'host_name': 'Chrome Remote Desktop Host',
'host_service_name': 'Chrome Remote Desktop Host Service',
'host_uninstaller_name': 'Chrome Remote Desktop Host Uninstaller',
'bundle_prefix': 'com.google.pkg',
},
}, { # else branding!="Chrome"
'variables': {
'copyright_by': 'The Chromium Authors.',
'host_name': 'Chromoting Host',
'host_service_name': 'Chromoting Host Service',
'host_uninstaller_name': 'Chromoting Host Uninstaller',
'bundle_prefix': 'org.chromium.pkg',
},
}],
], # conditions
'actions': [
{
'action_name': 'Zip installer files for signing',
'temp_dir': '<(SHARED_INTERMEDIATE_DIR)/remoting/remoting-me2me-host',
'zip_path': '<(PRODUCT_DIR)/remoting-me2me-host-<(OS).zip',
'generated_files': [
'<(PRODUCT_DIR)/remoting_me2me_host',
'<(PRODUCT_DIR)/remoting_host_uninstaller.app',
],
'generated_files_dst': [
'PrivilegedHelperTools/org.chromium.chromoting.me2me_host',
'Applications/<(host_uninstaller_name).app',
],
'source_files': [
'<@(remoting_host_installer_mac_files)',
],
'defs': [
'VERSION=<(version_full)',
'VERSION_SHORT=<(version_short)',
'VERSION_MAJOR=<!(python <(version_py_path) -f <(version_path) -t "@MAJOR@")',
'VERSION_MINOR=<!(python <(version_py_path) -f <(version_path) -t "@MINOR@")',
'COPYRIGHT_BY=<(copyright_by)',
'HOST_NAME=<(host_name)',
'HOST_SERVICE_NAME=<(host_service_name)',
'HOST_UNINSTALLER_NAME=<(host_uninstaller_name)',
'HOST_PKG=<(host_name)',
'HOST_SERVICE_PKG=<!(echo <(host_service_name) | sed "s/ //g")',
'HOST_UNINSTALLER_PKG=<!(echo <(host_uninstaller_name) | sed "s/ //g")',
'BUNDLE_ID_HOST=<(bundle_prefix).<(host_name)',
'BUNDLE_ID_HOST_SERVICE=<(bundle_prefix).<(host_service_name)',
'BUNDLE_ID_HOST_UNINSTALLER=<(bundle_prefix).<(host_uninstaller_name)',
'DMG_NAME=<(host_name)',
],
'inputs': [
'host/installer/build-installer-archive.py',
'<@(_source_files)',
],
'outputs': [
'<(_zip_path)',
],
'action': [
'python',
'host/installer/build-installer-archive.py',
'<(_temp_dir)',
'<(_zip_path)',
'--source-file-roots',
'<@(remoting_host_installer_mac_roots)',
'--source-files',
'<@(_source_files)',
'--generated-files',
'<@(_generated_files)',
'--generated-files-dst',
'<@(_generated_files_dst)',
'--defs',
'<@(_defs)',
],
},
], # actions
}, # end of target 'remoting_me2me_host_archive'
], # end of 'targets'
}], # 'OS=="mac"'
['OS=="win"', {
'targets': [
{
'target_name': 'remoting_elevated_controller',
'type': 'static_library',
'sources': [
'host/elevated_controller.idl',
'<(SHARED_INTERMEDIATE_DIR)/remoting/host/elevated_controller.h',
'<(SHARED_INTERMEDIATE_DIR)/remoting/host/elevated_controller_i.c',
],
# This target exports a hard dependency because dependent targets may
# include elevated_controller.h, a generated header.
'hard_dependency': 1,
'msvs_settings': {
'VCMIDLTool': {
'OutputDirectory': '<(SHARED_INTERMEDIATE_DIR)/remoting/host',
},
},
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
],
},
}, # end of target 'remoting_elevated_controller'
{
'target_name': 'remoting_host_controller',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'defines' : [
'_ATL_APARTMENT_THREADED',
'_ATL_NO_AUTOMATIC_NAMESPACE',
'_ATL_CSTRING_EXPLICIT_CONSTRUCTORS',
'STRICT',
],
'include_dirs': [
'<(INTERMEDIATE_DIR)',
],
'dependencies': [
'../base/base.gyp:base',
'remoting_elevated_controller',
'remoting_protocol',
'remoting_version_resources',
],
'sources': [
'host/branding.cc',
'host/branding.h',
'host/daemon_controller_common_win.cc',
'host/daemon_controller_common_win.h',
'host/elevated_controller.rc',
'host/elevated_controller_module_win.cc',
'host/elevated_controller_win.cc',
'host/elevated_controller_win.h',
'host/pin_hash.cc',
'host/pin_hash.h',
'host/verify_config_window_win.cc',
'host/verify_config_window_win.h',
'<(SHARED_INTERMEDIATE_DIR)/remoting/elevated_controller_version.rc'
],
'link_settings': {
'libraries': [
'-lcomctl32.lib',
],
},
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
"/MANIFESTUAC:level='requireAdministrator'",
"\"/manifestdependency:type='win32' "
"name='Microsoft.Windows.Common-Controls' "
"version='6.0.0.0' "
"processorArchitecture='*' "
"publicKeyToken='6595b64144ccf1df' language='*'\"",
],
# 2 == /SUBSYSTEM:WINDOWS
'SubSystem': '2',
},
},
}, # end of target 'remoting_host_controller'
{
'target_name': 'remoting_service',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_static',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../ipc/ipc.gyp:ipc',
'remoting_version_resources',
],
'sources': [
'base/scoped_sc_handle_win.h',
'host/branding.cc',
'host/branding.h',
'host/chromoting_messages.cc',
'host/chromoting_messages.h',
'host/constants.h',
'host/host_service.rc',
'host/host_service_resource.h',
'host/host_service_win.cc',
'host/host_service_win.h',
'host/sas_injector.h',
'host/sas_injector_win.cc',
'host/wts_console_monitor_win.h',
'host/wts_console_observer_win.h',
'host/wts_session_process_launcher_win.cc',
'host/wts_session_process_launcher_win.h',
'<(SHARED_INTERMEDIATE_DIR)/remoting/host_service_version.rc'
],
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'wtsapi32.lib',
],
},
},
}, # end of target 'remoting_service'
# Generates the version information resources for the Windows binaries.
# The .RC files are generated from the "version.rc.version" template and
# placed in the "<(SHARED_INTERMEDIATE_DIR)/remoting" folder.
# The substitution strings are taken from:
# - build/util/LASTCHANGE - the last source code revision.
# - chrome/VERSION - the build & patch versions.
# - remoting/VERSION - the major & minor versions.
# - xxx_branding - UI/localizable strings.
# - xxx.ver - per-binary non-localizable strings such as the binary
# name.
{
'target_name': 'remoting_version_resources',
'type': 'none',
'dependencies': [
'../build/util/build_util.gyp:lastchange#target',
],
'inputs': [
'chromium_branding',
'google_chrome_branding',
'version.rc.version',
'<(DEPTH)/build/util/LASTCHANGE',
'<(version_path)',
'<(chrome_version_path)',
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/remoting',
],
},
'sources': [
'host/elevated_controller.ver',
'host/host_service.ver',
'host/plugin/host_plugin.ver',
'host/remoting_me2me_host.ver',
],
'rules': [
{
'rule_name': 'version',
'extension': 'ver',
'variables': {
'lastchange_path': '<(DEPTH)/build/util/LASTCHANGE',
'template_input_path': 'version.rc.version',
},
'conditions': [
['branding == "Chrome"', {
'variables': {
'branding_path': 'google_chrome_branding',
},
}, { # else branding!="Chrome"
'variables': {
'branding_path': 'chromium_branding',
},
}],
],
'inputs': [
'<(template_input_path)',
'<(version_path)',
'<(chrome_version_path)',
'<(branding_path)',
'<(lastchange_path)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/remoting/<(RULE_INPUT_ROOT)_version.rc',
],
'action': [
'python',
'<(version_py_path)',
'-f', '<(RULE_INPUT_PATH)',
'-f', '<(chrome_version_path)',
'-f', '<(version_path)',
'-f', '<(branding_path)',
'-f', '<(lastchange_path)',
'<(template_input_path)',
'<@(_outputs)',
],
'message': 'Generating version information in <@(_outputs)'
},
],
}, # end of target 'remoting_version_resources'
], # end of 'targets'
}], # 'OS=="win"'
# The host installation is generated only if WiX is available and when
# building a non-component build. WiX does not provide a easy way to
# include all DLLs imported by the installed binaries, so supporting
# the component build becomes a burden.
['OS == "win" and component != "shared_library" and wix_exists == "True" \
and platformsdk_exists == "True"', {
'targets': [
{
'target_name': 'remoting_host_installation',
'type': 'none',
'dependencies': [
'remoting_host_controller',
'remoting_service',
'remoting_me2me_host',
],
'sources': [
'host/installer/chromoting.wxs',
],
'outputs': [
'<(PRODUCT_DIR)/chromoting.msi',
],
'wix_defines' : [
'"-dBranding=<(branding)"',
],
'conditions': [
['buildtype == "Official"', {
'wix_defines': [
'-dOfficialBuild=1',
],
}],
],
'rules': [
{
'rule_name': 'candle',
'extension': 'wxs',
'inputs': [
'<(PRODUCT_DIR)/remoting_host_controller.exe',
'<(PRODUCT_DIR)/remoting_me2me_host.exe',
'<(PRODUCT_DIR)/remoting_service.exe',
'<(platformsdk_path)/redist/x86/sas.dll',
],
'outputs': [
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).wixobj',
],
'process_outputs_as_sources': 1,
'msvs_cygwin_shell': 0,
'msvs_quote_cmd': 0,
'action': [
'"<(wix_path)\\candle"',
'-ext "<(wix_path)\\WixFirewallExtension.dll"',
'-ext "<(wix_path)\\WixUIExtension.dll"',
'-ext "<(wix_path)\\WixUtilExtension.dll"',
'-dVersion=<(version_full)',
'"-dFileSource=<(PRODUCT_DIR)."',
'"-dSasDllPath=<(platformsdk_path)/redist/x86/sas.dll"',
'<@(_wix_defines)',
'-out <@(_outputs)',
'"<(RULE_INPUT_PATH)"',
],
'message': 'Generating <@(_outputs)',
},
{
'rule_name': 'light',
'extension': 'wixobj',
'inputs': [
'<(PRODUCT_DIR)/remoting_host_controller.exe',
'<(PRODUCT_DIR)/remoting_me2me_host.exe',
'<(PRODUCT_DIR)/remoting_service.exe',
'<(platformsdk_path)/redist/x86/sas.dll',
],
'outputs': [
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi',
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).wixpdb',
],
'msvs_cygwin_shell': 0,
'msvs_quote_cmd': 0,
'action': [
'"<(wix_path)\\light"',
'-ext "<(wix_path)\\WixFirewallExtension.dll"',
'-ext "<(wix_path)\\WixUIExtension.dll"',
'-ext "<(wix_path)\\WixUtilExtension.dll"',
'-cultures:en-us',
'-sw1076',
'-dVersion=<(version_full)',
'"-dFileSource=<(PRODUCT_DIR)."',
'"-dSasDllPath=<(platformsdk_path)/redist/x86/sas.dll"',
'<@(_wix_defines)',
'-out "<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi"',
'"<(RULE_INPUT_PATH)"',
],
'message': 'Generating <(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi',
},
],
}, # end of target 'remoting_host_installation'
# The 'remoting_host_installation_unittest' target is used to make sure
# that the code signing job (running outside of Chromium tree) will be
# able to unpack and re-assemble the installation successfully.
#
# *** If this target fails to compile the code signing job will fail
# too, breaking the official build. ***
#
# N.B. The command lines passed to the WiX tools here should be in sync
# with the code signing script.
{
'target_name': 'remoting_host_installation_unittest',
'type': 'none',
'dependencies': [
'remoting_host_installation',
],
'sources': [
'<(PRODUCT_DIR)/chromoting.msi',
],
'outputs': [
'<(INTERMEDIATE_DIR)/chromoting-test.msi',
],
'rules': [
{
'rule_name': 'dark',
'extension': 'msi',
'outputs': [
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).wxs',
],
'process_outputs_as_sources': 1,
'msvs_cygwin_shell': 0,
'msvs_quote_cmd': 0,
'action': [
'"<(wix_path)\\dark"',
'"<(RULE_INPUT_PATH)"',
'-o <@(_outputs)',
'-o <@(_outputs)',
'-x <(INTERMEDIATE_DIR)',
],
'message': 'Dark: unpacking <(RULE_INPUT_PATH)',
},
{
'rule_name': 'candle',
'extension': 'wxs',
'outputs': [
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).wixobj',
],
'process_outputs_as_sources': 1,
'msvs_cygwin_shell': 0,
'msvs_quote_cmd': 0,
'action': [
'"<(wix_path)\\candle"',
'"<(RULE_INPUT_PATH)"',
'-o <@(_outputs)',
'-ext "<(wix_path)\\WixFirewallExtension.dll"',
],
'message': 'Candle: compiling <(RULE_INPUT_PATH)',
},
{
'rule_name': 'light',
'extension': 'wixobj',
'outputs': [
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT)-test.msi',
],
'msvs_cygwin_shell': 0,
'msvs_quote_cmd': 0,
'action': [
'"<(wix_path)\\light"',
'"<(RULE_INPUT_PATH)"',
'-o <@(_outputs)',
'-ext "<(wix_path)\\WixFirewallExtension.dll"',
'-sw1076',
],
'message': 'Light: linking <(RULE_INPUT_PATH)',
},
],
}, # end of target 'remoting_host_installation_unittest'
], # end of 'targets'
}], # '<(wix_path) != ""'
], # end of 'conditions'
'targets': [
{
'target_name': 'remoting_client_plugin',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'defines': [
'HAVE_STDINT_H', # Required by on2_integer.h
],
'dependencies': [
'remoting_base',
'remoting_client',
'remoting_jingle_glue',
'../media/media.gyp:media',
'../ppapi/ppapi.gyp:ppapi_cpp_objects',
'../skia/skia.gyp:skia',
],
'sources': [
'client/plugin/chromoting_instance.cc',
'client/plugin/chromoting_instance.h',
'client/plugin/chromoting_scriptable_object.cc',
'client/plugin/chromoting_scriptable_object.h',
'client/plugin/pepper_entrypoints.cc',
'client/plugin/pepper_entrypoints.h',
'client/plugin/pepper_input_handler.cc',
'client/plugin/pepper_input_handler.h',
'client/plugin/pepper_network_manager.cc',
'client/plugin/pepper_network_manager.h',
'client/plugin/pepper_packet_socket_factory.cc',
'client/plugin/pepper_packet_socket_factory.h',
'client/plugin/pepper_plugin_thread_delegate.cc',
'client/plugin/pepper_plugin_thread_delegate.h',
'client/plugin/pepper_view.cc',
'client/plugin/pepper_view.h',
'client/plugin/pepper_util.cc',
'client/plugin/pepper_util.h',
'client/plugin/pepper_xmpp_proxy.cc',
'client/plugin/pepper_xmpp_proxy.h',
],
}, # end of target 'remoting_client_plugin'
{
'target_name': 'remoting_host_plugin',
'type': 'loadable_module',
'variables': { 'enable_wexit_time_destructors': 1, },
'product_extension': '<(host_plugin_extension)',
'product_prefix': '<(host_plugin_prefix)',
'dependencies': [
'remoting_base',
'remoting_host',
'remoting_jingle_glue',
'../third_party/npapi/npapi.gyp:npapi',
],
'sources': [
'host/branding.cc',
'host/branding.h',
'host/it2me_host_user_interface.cc',
'host/it2me_host_user_interface.h',
'host/plugin/daemon_controller.h',
'host/daemon_controller_common_win.cc',
'host/daemon_controller_common_win.h',
'host/plugin/daemon_controller_linux.cc',
'host/plugin/daemon_controller_mac.cc',
'host/plugin/daemon_controller_win.cc',
'host/plugin/daemon_installer_win.cc',
'host/plugin/daemon_installer_win.h',
'host/plugin/host_log_handler.cc',
'host/plugin/host_log_handler.h',
'host/plugin/host_plugin.cc',
'host/plugin/host_plugin_resource.h',
'host/plugin/host_plugin_utils.cc',
'host/plugin/host_plugin_utils.h',
'host/plugin/host_script_object.cc',
'host/plugin/host_script_object.h',
],
'conditions': [
['OS=="mac"', {
'mac_bundle': 1,
'xcode_settings': {
'CHROMIUM_BUNDLE_ID': '<(mac_bundle_id)',
'INFOPLIST_FILE': 'host/plugin/host_plugin-Info.plist',
'INFOPLIST_PREPROCESS': 'YES',
# TODO(maruel): Use INFOPLIST_PREFIX_HEADER to remove the need to
# duplicate string once
# http://code.google.com/p/gyp/issues/detail?id=243 is fixed.
'INFOPLIST_PREPROCESSOR_DEFINITIONS': 'HOST_PLUGIN_MIME_TYPE="<(host_plugin_mime_type)" HOST_PLUGIN_NAME="<(host_plugin_name)" HOST_PLUGIN_DESCRIPTION="<(host_plugin_description)"',
},
# TODO(mark): Come up with a fancier way to do this. It should
# only be necessary to list host_plugin-Info.plist once, not the
# three times it is listed here.
'mac_bundle_resources': [
'host/disconnect_window.xib',
'host/plugin/host_plugin-Info.plist',
'resources/chromoting16.png',
'resources/chromoting48.png',
'resources/chromoting128.png',
],
'mac_bundle_resources!': [
'host/plugin/host_plugin-Info.plist',
],
'conditions': [
['mac_breakpad==1', {
'variables': {
# A real .dSYM is needed for dump_syms to operate on.
'mac_real_dsym': 1,
},
}],
], # conditions
}], # OS=="mac"
[ 'OS=="win"', {
'dependencies': [
'../google_update/google_update.gyp:google_update',
'../ipc/ipc.gyp:ipc',
'remoting_elevated_controller',
'remoting_version_resources',
],
'include_dirs': [
'<(INTERMEDIATE_DIR)',
],
'sources': [
'host/plugin/host_plugin.def',
'host/plugin/host_plugin.rc',
'<(SHARED_INTERMEDIATE_DIR)/remoting/host_plugin_version.rc'
],
}],
],
}, # end of target 'remoting_host_plugin'
{
'target_name': 'remoting_webapp',
'type': 'none',
'dependencies': [
'remoting_host_plugin',
],
'sources': [
'webapp/build-webapp.py',
'webapp/verify-webapp.py',
'<(version_path)',
'<(chrome_version_path)',
'<@(remoting_webapp_files)',
'<@(remoting_webapp_locale_files)',
],
# Can't use a 'copies' because we need to manipulate
# the manifest file to get the right plugin name.
# Also we need to move the plugin into the me2mom
# folder, which means 2 copies, and gyp doesn't
# seem to guarantee the ordering of 2 copies statements
# when the actual project is generated.
'actions': [
{
'action_name': 'Verify Remoting WebApp i18n',
'inputs': [
'host/plugin/host_script_object.cc',
'webapp/_locales/en/messages.json',
'webapp/client_screen.js',
'webapp/host_controller.js',
'webapp/host_table_entry.js',
'webapp/host_setup_dialog.js',
'webapp/main.html',
'webapp/manifest.json',
'webapp/remoting.js',
'webapp/verify-webapp.py',
],
'outputs': [
'<(PRODUCT_DIR)/remoting/webapp_verified.stamp',
],
'action': [
'python',
'webapp/verify-webapp.py',
'<(PRODUCT_DIR)/remoting/webapp_verified.stamp',
'webapp/_locales/en/messages.json',
'webapp/client_screen.js',
'webapp/host_controller.js',
'webapp/host_table_entry.js',
'webapp/host_setup_dialog.js',
'webapp/main.html',
'webapp/manifest.json',
'webapp/remoting.js',
'host/plugin/host_script_object.cc',
],
},
{
'action_name': 'Build Remoting WebApp',
'output_dir': '<(PRODUCT_DIR)/remoting/remoting.webapp',
'plugin_path': '<(PRODUCT_DIR)/<(host_plugin_prefix)remoting_host_plugin.<(host_plugin_extension)',
'zip_path': '<(PRODUCT_DIR)/remoting-webapp.zip',
'inputs': [
'webapp/build-webapp.py',
'<(_plugin_path)',
'<(version_path)',
'<(chrome_version_path)',
'<@(remoting_webapp_files)',
'<@(remoting_webapp_locale_files)',
],
'outputs': [
'<(_output_dir)',
'<(_zip_path)',
],
'action': [
'python', 'webapp/build-webapp.py',
'<(buildtype)',
'<(version_full)',
'<(host_plugin_mime_type)',
'<(_output_dir)',
'<(_zip_path)',
'<(_plugin_path)',
'<@(remoting_webapp_files)',
'--locales',
'<@(remoting_webapp_locale_files)',
],
},
],
}, # end of target 'remoting_webapp'
{
'target_name': 'remoting_base',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../ui/ui.gyp:ui',
'../net/net.gyp:net',
'../skia/skia.gyp:skia',
'../third_party/libvpx/libvpx.gyp:libvpx',
'../third_party/protobuf/protobuf.gyp:protobuf_lite',
'../third_party/zlib/zlib.gyp:zlib',
'../media/media.gyp:yuv_convert',
'remoting_jingle_glue',
'proto/chromotocol.gyp:chromotocol_proto_lib',
],
'export_dependent_settings': [
'../base/base.gyp:base',
'../net/net.gyp:net',
'../skia/skia.gyp:skia',
'../third_party/protobuf/protobuf.gyp:protobuf_lite',
'proto/chromotocol.gyp:chromotocol_proto_lib',
],
# This target needs a hard dependency because dependent targets
# depend on chromotocol_proto_lib for headers.
'hard_dependency': 1,
'sources': [
'base/auth_token_util.cc',
'base/auth_token_util.h',
'base/capture_data.cc',
'base/capture_data.h',
'base/compound_buffer.cc',
'base/compound_buffer.h',
'base/compressor.h',
'base/compressor_verbatim.cc',
'base/compressor_verbatim.h',
'base/compressor_zlib.cc',
'base/compressor_zlib.h',
'base/constants.cc',
'base/constants.h',
'base/decoder.h',
'base/decoder_vp8.cc',
'base/decoder_vp8.h',
'base/decoder_row_based.cc',
'base/decoder_row_based.h',
'base/decompressor.h',
'base/decompressor_verbatim.cc',
'base/decompressor_verbatim.h',
'base/decompressor_zlib.cc',
'base/decompressor_zlib.h',
'base/encoder.h',
'base/encoder_vp8.cc',
'base/encoder_vp8.h',
'base/encoder_row_based.cc',
'base/encoder_row_based.h',
'base/plugin_message_loop_proxy.cc',
'base/plugin_message_loop_proxy.h',
'base/rate_counter.cc',
'base/rate_counter.h',
'base/running_average.cc',
'base/running_average.h',
'base/scoped_thread_proxy.cc',
'base/scoped_thread_proxy.h',
'base/util.cc',
'base/util.h',
],
}, # end of target 'remoting_base'
{
'target_name': 'remoting_host',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'remoting_base',
'remoting_jingle_glue',
'remoting_protocol',
'differ_block',
'../crypto/crypto.gyp:crypto',
],
'sources': [
'host/capturer.h',
'host/capturer_helper.cc',
'host/capturer_helper.h',
'host/capturer_fake.cc',
'host/capturer_fake.h',
'host/capturer_linux.cc',
'host/capturer_mac.cc',
'host/capturer_win.cc',
'host/capture_scheduler.cc',
'host/capture_scheduler.h',
'host/chromoting_host.cc',
'host/chromoting_host.h',
'host/chromoting_host_context.cc',
'host/chromoting_host_context.h',
'host/client_session.cc',
'host/client_session.h',
'host/constants.h',
'host/continue_window.h',
'host/continue_window_mac.mm',
'host/continue_window_linux.cc',
'host/continue_window_win.cc',
'host/curtain.h',
'host/curtain_linux.cc',
'host/curtain_mac.cc',
'host/curtain_win.cc',
'host/desktop_environment.cc',
'host/desktop_environment.h',
'host/desktop_win.cc',
'host/desktop_win.h',
'host/differ.h',
'host/differ.cc',
'host/disconnect_window.h',
'host/disconnect_window_linux.cc',
'host/disconnect_window_mac.h',
'host/disconnect_window_mac.mm',
'host/disconnect_window_win.cc',
'host/event_executor.h',
'host/event_executor_linux.cc',
'host/event_executor_mac.cc',
'host/event_executor_win.cc',
'host/heartbeat_sender.cc',
'host/heartbeat_sender.h',
'host/gaia_oauth_client.cc',
'host/gaia_oauth_client.h',
'host/host_config.cc',
'host/host_config.h',
'host/host_key_pair.cc',
'host/host_key_pair.h',
'host/host_port_allocator.cc',
'host/host_port_allocator.h',
'host/host_secret.cc',
'host/host_secret.h',
'host/host_status_observer.h',
'host/in_memory_host_config.cc',
'host/in_memory_host_config.h',
'host/json_host_config.cc',
'host/json_host_config.h',
'host/local_input_monitor.h',
'host/local_input_monitor_linux.cc',
'host/local_input_monitor_mac.mm',
'host/local_input_monitor_thread_linux.cc',
'host/local_input_monitor_thread_linux.h',
'host/local_input_monitor_thread_win.cc',
'host/local_input_monitor_thread_win.h',
'host/local_input_monitor_win.cc',
'host/log_to_server.cc',
'host/log_to_server.h',
'host/network_settings.h',
'host/pin_hash.cc',
'host/pin_hash.h',
'host/policy_hack/nat_policy.h',
'host/policy_hack/nat_policy.cc',
'host/policy_hack/nat_policy_linux.cc',
'host/policy_hack/nat_policy_mac.mm',
'host/policy_hack/nat_policy_win.cc',
'host/register_support_host_request.cc',
'host/register_support_host_request.h',
'host/remote_input_filter.cc',
'host/remote_input_filter.h',
'host/screen_recorder.cc',
'host/screen_recorder.h',
'host/server_log_entry.cc',
'host/server_log_entry.h',
'host/session_event_executor_win.cc',
'host/session_event_executor_win.h',
'host/signaling_connector.cc',
'host/signaling_connector.h',
'host/scoped_thread_desktop_win.cc',
'host/scoped_thread_desktop_win.h',
'host/ui_strings.cc',
'host/ui_strings.h',
'host/url_fetcher.cc',
'host/url_fetcher.h',
'host/url_request_context.cc',
'host/url_request_context.h',
'host/usb_keycode_map.h',
'host/user_authenticator.h',
'host/user_authenticator_linux.cc',
'host/user_authenticator_mac.cc',
'host/user_authenticator_win.cc',
'host/vlog_net_log.cc',
'host/vlog_net_log.h',
],
'conditions': [
['toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
'sources': [
'host/x_server_pixel_buffer.cc',
'host/x_server_pixel_buffer.h',
],
'link_settings': {
'libraries': [
'-lX11',
'-lXdamage',
'-lXfixes',
'-lXtst',
'-lpam',
'-lXext'
],
},
}],
['OS=="mac"', {
'sources': [
'../third_party/GTM/AppKit/GTMCarbonEvent.h',
'../third_party/GTM/AppKit/GTMCarbonEvent.m',
'../third_party/GTM/DebugUtils/GTMDebugSelectorValidation.h',
'../third_party/GTM/DebugUtils/GTMTypeCasting.h',
'../third_party/GTM/Foundation/GTMObjectSingleton.h',
'../third_party/GTM/GTMDefines.h',
],
'include_dirs': [
'../third_party/GTM',
'../third_party/GTM/AppKit',
'../third_party/GTM/DebugUtils',
'../third_party/GTM/Foundation',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/OpenGL.framework',
],
},
}],
['OS=="win"', {
'sources': [
'host/chromoting_messages.cc',
'host/chromoting_messages.h',
],
}],
],
}, # end of target 'remoting_host'
{
'target_name': 'remoting_client',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'remoting_base',
'remoting_jingle_glue',
'remoting_protocol',
],
'sources': [
'client/chromoting_client.cc',
'client/chromoting_client.h',
'client/chromoting_stats.cc',
'client/chromoting_stats.h',
'client/chromoting_view.h',
'client/client_config.cc',
'client/client_config.h',
'client/client_context.cc',
'client/client_context.h',
'client/frame_consumer.h',
'client/frame_consumer_proxy.cc',
'client/frame_consumer_proxy.h',
'client/frame_producer.h',
'client/key_event_mapper.cc',
'client/key_event_mapper.h',
'client/rectangle_update_decoder.cc',
'client/rectangle_update_decoder.h',
],
}, # end of target 'remoting_client'
{
'target_name': 'remoting_simple_host',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'remoting_base',
'remoting_host',
'remoting_jingle_glue',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../media/media.gyp:media',
],
'sources': [
'host/continue_window.h',
'host/continue_window_mac.mm',
'host/continue_window_linux.cc',
'host/continue_window_win.cc',
'host/disconnect_window_linux.cc',
'host/disconnect_window_mac.h',
'host/disconnect_window_mac.mm',
'host/disconnect_window_win.cc',
'host/it2me_host_user_interface.cc',
'host/it2me_host_user_interface.h',
'host/simple_host_process.cc',
],
'conditions': [
['OS=="win"', {
'dependencies': [
'../ipc/ipc.gyp:ipc'
],
}],
],
}, # end of target 'remoting_simple_host'
{
'target_name': 'remoting_me2me_host',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'remoting_base',
'remoting_host',
'remoting_jingle_glue',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../media/media.gyp:media',
],
'sources': [
'host/branding.cc',
'host/branding.h',
'host/host_event_logger.h',
'host/sighup_listener_mac.cc',
'host/sighup_listener_mac.h',
'host/remoting_me2me_host.cc',
],
'conditions': [
['os_posix==1', {
'sources': [
'host/host_event_logger_posix.cc',
],
}],
['OS=="mac"', {
'sources': [
'host/remoting_me2me_host-Info.plist',
],
'conditions': [
['branding == "Chrome"', {
'variables': {
'host_bundle_id': 'com.google.chrome_remote_desktop.remoting_me2me_host',
},
}, { # else branding!="Chrome"
'variables': {
'host_bundle_id': 'org.chromium.chromoting.remoting_me2me_host',
},
}],
],
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-sectcreate,__TEXT,__info_plist,<(INTERMEDIATE_DIR)/remoting_me2me_host-Info.plist'
],
},
'rules': [
{
'rule_name': 'brand_mac',
'extension': 'plist',
'inputs': [ ],
'outputs': [
'<(INTERMEDIATE_DIR)/remoting_me2me_host-Info.plist',
],
'action': [
'python', '<(version_py_path)',
'-i', 'host/remoting_me2me_host-Info.plist',
'-o', '<(INTERMEDIATE_DIR)/remoting_me2me_host-Info.plist',
'-e', 'VERSION_FULL="<(version_full)"',
'-e', 'VERSION_SHORT="<(version_short)"',
'-e', 'BUNDLE_ID="<(host_bundle_id)"',
],
'process_outputs_as_sources': 1,
'message': 'Branding and versioning remoting_me2me_host.',
},
],
}],
['OS=="win"', {
'dependencies': [
'../ipc/ipc.gyp:ipc',
'remoting_version_resources',
],
'sources': [
'host/host_event_logger_win.cc',
'host/remoting_host_messages.mc',
'<(SHARED_INTERMEDIATE_DIR)/remoting/remoting_me2me_host_version.rc'
],
'include_dirs': [
'<(INTERMEDIATE_DIR)',
],
# Rule to run the message compiler.
'rules': [
{
'rule_name': 'message_compiler',
'extension': 'mc',
'inputs': [ ],
'outputs': [
'<(INTERMEDIATE_DIR)/remoting_host_messages.h',
'<(INTERMEDIATE_DIR)/remoting_host_messages.rc',
],
'msvs_cygwin_shell': 0,
'msvs_quote_cmd': 0,
'action': [
'mc.exe -h <(INTERMEDIATE_DIR) -r <(INTERMEDIATE_DIR) <(RULE_INPUT_PATH)',
],
'process_outputs_as_sources': 1,
'message': 'Running message compiler on <(RULE_INPUT_PATH).',
},
],
'msvs_settings': {
'VCLinkerTool': {
# 2 == /SUBSYSTEM:WINDOWS
'SubSystem': '2',
},
},
}],
], # end of 'conditions'
}, # end of target 'remoting_me2me_host'
{
'target_name': 'remoting_host_keygen',
'type': 'executable',
'dependencies': [
'remoting_base',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../crypto/crypto.gyp:crypto',
],
'sources': [
'host/keygen_main.cc',
],
}, # end of target 'remoting_host_keygen'
{
'target_name': 'remoting_jingle_glue',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../jingle/jingle.gyp:jingle_glue',
'../jingle/jingle.gyp:notifier',
'../third_party/libjingle/libjingle.gyp:libjingle',
'../third_party/libjingle/libjingle.gyp:libjingle_p2p',
],
'export_dependent_settings': [
'../third_party/libjingle/libjingle.gyp:libjingle',
'../third_party/libjingle/libjingle.gyp:libjingle_p2p',
],
'sources': [
'jingle_glue/iq_sender.cc',
'jingle_glue/iq_sender.h',
'jingle_glue/javascript_signal_strategy.cc',
'jingle_glue/javascript_signal_strategy.h',
'jingle_glue/jingle_info_request.cc',
'jingle_glue/jingle_info_request.h',
'jingle_glue/jingle_thread.cc',
'jingle_glue/jingle_thread.h',
'jingle_glue/signal_strategy.h',
'jingle_glue/ssl_adapter.h',
'jingle_glue/ssl_adapter.cc',
'jingle_glue/ssl_socket_adapter.cc',
'jingle_glue/ssl_socket_adapter.h',
'jingle_glue/xmpp_proxy.h',
'jingle_glue/xmpp_signal_strategy.cc',
'jingle_glue/xmpp_signal_strategy.h',
'jingle_glue/xmpp_socket_adapter.cc',
'jingle_glue/xmpp_socket_adapter.h',
],
}, # end of target 'remoting_jingle_glue'
{
'target_name': 'remoting_protocol',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'remoting_base',
'remoting_jingle_glue',
'../crypto/crypto.gyp:crypto',
'../jingle/jingle.gyp:jingle_glue',
'../net/net.gyp:net',
],
'export_dependent_settings': [
'remoting_jingle_glue',
],
'sources': [
'protocol/auth_util.cc',
'protocol/auth_util.h',
'protocol/authentication_method.cc',
'protocol/authentication_method.h',
'protocol/authenticator.cc',
'protocol/authenticator.h',
'protocol/buffered_socket_writer.cc',
'protocol/buffered_socket_writer.h',
'protocol/channel_authenticator.h',
'protocol/channel_dispatcher_base.cc',
'protocol/channel_dispatcher_base.h',
'protocol/client_control_dispatcher.cc',
'protocol/client_control_dispatcher.h',
'protocol/client_event_dispatcher.cc',
'protocol/client_event_dispatcher.h',
'protocol/client_stub.h',
'protocol/clipboard_filter.h',
'protocol/clipboard_filter.cc',
'protocol/clipboard_stub.h',
'protocol/connection_to_client.cc',
'protocol/connection_to_client.h',
'protocol/connection_to_host.cc',
'protocol/connection_to_host.h',
'protocol/content_description.cc',
'protocol/content_description.h',
'protocol/errors.h',
'protocol/host_control_dispatcher.cc',
'protocol/host_control_dispatcher.h',
'protocol/host_event_dispatcher.cc',
'protocol/host_event_dispatcher.h',
'protocol/host_event_stub.h',
'protocol/host_stub.h',
'protocol/input_event_tracker.cc',
'protocol/input_event_tracker.h',
'protocol/input_filter.cc',
'protocol/input_filter.h',
'protocol/input_stub.h',
'protocol/it2me_host_authenticator_factory.cc',
'protocol/it2me_host_authenticator_factory.h',
'protocol/jingle_messages.cc',
'protocol/jingle_messages.h',
'protocol/jingle_session.cc',
'protocol/jingle_session.h',
'protocol/jingle_session_manager.cc',
'protocol/jingle_session_manager.h',
'protocol/libjingle_transport_factory.cc',
'protocol/libjingle_transport_factory.h',
'protocol/me2me_host_authenticator_factory.cc',
'protocol/me2me_host_authenticator_factory.h',
'protocol/message_decoder.cc',
'protocol/message_decoder.h',
'protocol/message_reader.cc',
'protocol/message_reader.h',
'protocol/mouse_input_filter.cc',
'protocol/mouse_input_filter.h',
'protocol/negotiating_authenticator.cc',
'protocol/negotiating_authenticator.h',
'protocol/pepper_transport_factory.cc',
'protocol/pepper_transport_factory.h',
'protocol/pepper_transport_socket_adapter.cc',
'protocol/pepper_transport_socket_adapter.h',
'protocol/protobuf_video_reader.cc',
'protocol/protobuf_video_reader.h',
'protocol/protobuf_video_writer.cc',
'protocol/protobuf_video_writer.h',
'protocol/rtcp_writer.cc',
'protocol/rtcp_writer.h',
'protocol/rtp_reader.cc',
'protocol/rtp_reader.h',
'protocol/rtp_utils.cc',
'protocol/rtp_utils.h',
'protocol/rtp_video_reader.cc',
'protocol/rtp_video_reader.h',
'protocol/rtp_video_writer.cc',
'protocol/rtp_video_writer.h',
'protocol/rtp_writer.cc',
'protocol/rtp_writer.h',
'protocol/session.h',
'protocol/session_config.cc',
'protocol/session_config.h',
'protocol/session_manager.h',
'protocol/socket_reader_base.cc',
'protocol/socket_reader_base.h',
'protocol/ssl_hmac_channel_authenticator.cc',
'protocol/ssl_hmac_channel_authenticator.h',
'protocol/transport.cc',
'protocol/transport.h',
'protocol/transport_config.cc',
'protocol/transport_config.h',
'protocol/util.cc',
'protocol/util.h',
'protocol/v1_authenticator.cc',
'protocol/v1_authenticator.h',
'protocol/v2_authenticator.cc',
'protocol/v2_authenticator.h',
'protocol/video_reader.cc',
'protocol/video_reader.h',
'protocol/video_stub.h',
'protocol/video_writer.cc',
'protocol/video_writer.h',
],
}, # end of target 'remoting_protocol'
{
'target_name': 'differ_block',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../media/media.gyp:cpu_features',
],
'conditions': [
[ 'target_arch == "ia32" or target_arch == "x64"', {
'dependencies': [
'differ_block_sse2',
],
}],
],
'sources': [
'host/differ_block.cc',
'host/differ_block.h',
],
}, # end of target differ_block
{
'target_name': 'differ_block_sse2',
'type': 'static_library',
'conditions': [
[ 'os_posix == 1 and OS != "mac"', {
'cflags': [
'-msse2',
],
}],
],
'sources': [
'host/differ_block_sse2.cc',
],
}, # end of target differ_block_sse2
# Remoting unit tests
{
'target_name': 'remoting_unittests',
'type': 'executable',
'dependencies': [
'remoting_base',
'remoting_client',
'remoting_host',
'remoting_jingle_glue',
'remoting_protocol',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
'../media/media.gyp:media',
'../net/net.gyp:net_test_support',
'../ppapi/ppapi.gyp:ppapi_cpp',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../ui/ui.gyp:ui',
],
'include_dirs': [
'../testing/gmock/include',
],
'sources': [
'base/auth_token_util_unittest.cc',
'base/codec_test.cc',
'base/codec_test.h',
'base/compound_buffer_unittest.cc',
'base/compressor_zlib_unittest.cc',
'base/decoder_vp8_unittest.cc',
'base/decompressor_zlib_unittest.cc',
'base/encode_decode_unittest.cc',
'base/encoder_vp8_unittest.cc',
'base/encoder_row_based_unittest.cc',
'base/base_mock_objects.cc',
'base/base_mock_objects.h',
'base/util_unittest.cc',
'client/key_event_mapper_unittest.cc',
'host/capturer_helper_unittest.cc',
'host/capturer_linux_unittest.cc',
'host/capturer_mac_unittest.cc',
'host/capturer_win_unittest.cc',
'host/chromoting_host_context_unittest.cc',
'host/chromoting_host_unittest.cc',
'host/client_session_unittest.cc',
'host/differ_block_unittest.cc',
'host/differ_unittest.cc',
'host/heartbeat_sender_unittest.cc',
'host/host_key_pair_unittest.cc',
'host/host_mock_objects.cc',
'host/host_mock_objects.h',
'host/it2me_host_user_interface.cc',
'host/it2me_host_user_interface.h',
'host/json_host_config_unittest.cc',
'host/log_to_server_unittest.cc',
'host/pin_hash_unittest.cc',
'host/register_support_host_request_unittest.cc',
'host/remote_input_filter_unittest.cc',
'host/screen_recorder_unittest.cc',
'host/server_log_entry_unittest.cc',
'host/test_key_pair.h',
'host/url_fetcher_unittest.cc',
'jingle_glue/fake_signal_strategy.cc',
'jingle_glue/fake_signal_strategy.h',
'jingle_glue/iq_sender_unittest.cc',
'jingle_glue/jingle_thread_unittest.cc',
'jingle_glue/mock_objects.cc',
'jingle_glue/mock_objects.h',
'protocol/authenticator_test_base.cc',
'protocol/authenticator_test_base.h',
'protocol/connection_tester.cc',
'protocol/connection_tester.h',
'protocol/connection_to_client_unittest.cc',
'protocol/fake_authenticator.cc',
'protocol/fake_authenticator.h',
'protocol/fake_session.cc',
'protocol/fake_session.h',
'protocol/jingle_messages_unittest.cc',
'protocol/jingle_session_unittest.cc',
'protocol/input_event_tracker_unittest.cc',
'protocol/message_decoder_unittest.cc',
'protocol/message_reader_unittest.cc',
'protocol/mouse_input_filter_unittest.cc',
'protocol/negotiating_authenticator_unittest.cc',
'protocol/protocol_mock_objects.cc',
'protocol/protocol_mock_objects.h',
'protocol/ppapi_module_stub.cc',
'protocol/rtp_video_reader_unittest.cc',
'protocol/rtp_video_writer_unittest.cc',
'protocol/ssl_hmac_channel_authenticator_unittest.cc',
'protocol/v1_authenticator_unittest.cc',
'protocol/v2_authenticator_unittest.cc',
'run_all_unittests.cc',
],
'conditions': [
[ 'OS=="win"', {
'dependencies': [
'../ipc/ipc.gyp:ipc'
],
}],
['chromeos != 0', {
'dependencies!': [
'remoting_host',
],
'sources/': [
['exclude', 'host/*'],
]
}],
['toolkit_uses_gtk == 1', {
'dependencies': [
# Needed for the following #include chain:
# base/run_all_unittests.cc
# ../base/test_suite.h
# gtk/gtk.h
'../build/linux/system.gyp:gtk',
'../build/linux/system.gyp:ssl',
],
'conditions': [
[ 'linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
},
],
],
}],
], # end of 'conditions'
}, # end of target 'remoting_unittests'
], # end of targets
}
|
py | b4062b2bf028b852289e1324805dc5b4697df862 | from functools import wraps
from flask_restx import abort
from flask import request
from settings import MAX_RETRY_COUNT
import logging
def task_request():
def decorator(func):
@wraps(func)
def middleware(*args, **kwargs):
task_name = request.headers.get("X-Cloudtasks-Taskname")
if not task_name:
abort(403, message='Access denied')
task_execution_count = int(request.headers.get(
"X-CloudTasks-TaskExecutionCount"))
if task_execution_count > MAX_RETRY_COUNT:
logging.error(f'Task getting suspended {task_name}')
return 'done'
return func(*args, **kwargs)
return middleware
return decorator
|
py | b4062bdc08139240d1806d4915877e1c94b70045 | from unittest import TestCase
from unittest.mock import patch
from better_id3.__main__ import CLI
class TestMain(TestCase):
def setUp(self) -> None:
self.cli = CLI()
self.config_path = "config_path"
def test_format(self):
self.assert_command_run(self.cli.clean, "clean")
def assert_command_run(self, cli_fn, command_name):
with patch("better_id3.__main__.load_config_from_file") as mock_load_config:
cli_fn(self.config_path)
mock_load_config.assert_called_with(self.config_path)
mock_load_config.return_value[command_name].run.assert_called()
|
py | b4062c6f987f336f4d5bd5c141ea5e1c52a4da7f | from django.contrib import admin
from recipes import models
admin.site.register(models.Ingredient)
admin.site.register(models.Recipe)
|
py | b4062e0558be75d0b55a4884e9abb7a1391a2463 | from fastapi import APIRouter, Request, status, Depends
from fastapi.openapi.models import APIKey
from fastapi.responses import JSONResponse
from commands import commands
from router.key import validate_api_key
# Defining our API router
def get_router(app):
# Create a FastAPI router
router = APIRouter()
# Creates a persistent storage directory in the recommended storage path
@router.post("/storage/create", response_description="Creating storage directories")
async def create_app(request: Request, api_key: APIKey = Depends(validate_api_key)):
body_parsed = await request.json()
success = commands.storage_create(volume_name=body_parsed["name"])
content = {"success": success}
return JSONResponse(status_code=status.HTTP_200_OK, content=content)
# Mount a Storage
@router.post("/storage/{app_name}/mount", response_description="Mount a Storage")
async def create_app(request: Request, app_name: str, api_key: APIKey = Depends(validate_api_key)):
body_parsed = await request.json()
success = commands.storage_mount(app_name=app_name, mount_point_left=body_parsed["mount_point_left"],
mount_point_right=body_parsed["mount_point_right"])
content = {"success": success}
return JSONResponse(status_code=status.HTTP_200_OK, content=content)
# We return our router
return router
|
py | b4062eb8472f3ab4bfe2f2180f58c197df6000ce | import os
import csv
import datetime as dt
from io import BytesIO, StringIO
from odm2_postgres_api.schemas import schemas
class UploadBlobWrapper:
def __init__(self):
self._uploader = None
def __call__(self, *args, **kwargs):
if self._uploader is None:
self.set_uploader()
self._uploader(*args, **kwargs)
def set_uploader(self):
environment = os.environ.get("NIVA_ENVIRONMENT")
if environment in ["dev", "master"]:
from gcloud_common_utils.blob_helper import upload_blob
elif environment == "localdev":
from gcloud_common_utils.blob_helper_local import upload_blob
else:
raise ValueError(f"NIVA_ENVIRONMENT set to {environment} please use: 'localdev', 'dev' or 'master'")
self._uploader = upload_blob
upload_blob_wrapper = UploadBlobWrapper()
def generate_csv_from_form(begroing_result: schemas.BegroingResultCreate):
date_string = (begroing_result.date + dt.timedelta(hours=6)).date().strftime("%d-%m-%Y %H:%M:%S") # round date
# date_string = begroing_result.date.strftime('%d-%m-%Y %H:%M:%S')
csv_rows = []
for index, species in enumerate(begroing_result.taxons):
used_method_indices = [i for i, e in enumerate(begroing_result.observations[index]) if e]
if len(used_method_indices) != 1:
raise ValueError("Must have one and only one method per species")
used_method_index = used_method_indices[0]
method = begroing_result.methods[used_method_index].methodname
value = begroing_result.observations[index][used_method_index]
data_row = {
"Prosjektnavn": "&&".join([e.directivedescription for e in begroing_result.projects]),
"lok_sta": begroing_result.station.samplingfeaturename.split(",")[0],
"dato": date_string,
"rubin_kode": species["taxonomicclassifiercommonname"].split(",")[0],
"mengderef": "% dekning",
}
if method == "Microscopic abundance":
data_row["Mengde_tall"] = ""
data_row["Flagg"] = ""
data_row["Mengde_tekst"] = value
elif method == "Macroscopic coverage":
# if begroing_result.observations[index][method_index] == '<1'
data_row["Mengde_tall"] = 1 if value == "<1" else value
data_row["Flagg"] = "<" if value == "<1" else ""
data_row["Mengde_tekst"] = ""
else:
raise ValueError("Currently only methods 'Microscopic abundance' and 'Macroscopic coverage' are allowed")
csv_rows.append(data_row)
return csv_rows
def put_csv_to_bucket(csv_data):
bucket_name = os.environ["DATA_UPLOAD_BUCKET"]
file_name = f"begroing/{dt.datetime.now().isoformat()}_data.csv"
with StringIO() as csv_file:
fieldnames = list(csv_data[0].keys())
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for row in csv_data:
writer.writerow(row)
content = csv_file.getvalue().encode("utf-8")
with BytesIO(initial_bytes=content) as csv_file:
upload_blob_wrapper(bucket_name, file_name, csv_file)
|
py | b4062efa3e36bc278fc7f9e8672a92f1aeffa4bf | '''
Feeds
=====
The following methods allow for interaction into the Tenable.sc
:sc-api:`Feed <Feed.html>` API.
Methods available on ``sc.feeds``:
.. rst-class:: hide-signature
.. autoclass:: FeedAPI
:members:
'''
from .base import SCEndpoint
class FeedAPI(SCEndpoint):
def status(self, feed_type=None):
'''
Returns the status of either a specific feed type (if requested) or all
of the feed types if nothing is specifically asked.
:sc-api:`feed <Feed.html>`
:sc-api:`feed: feed-type <Feed.html#FeedRESTReference-FeedGETType>`
Args:
feed_type (str, optional):
The feed type to specifically return. Valid types are `active`,
`passive`, `lce`, `sc`, or `all`.
Returns:
:obj:`dict`:
If no specific feed type is specified, then a dictionary with
each type listed with a sub-dictionary detailing the status is
returned. If a specific feed type is requested, then only the
status information for that feed type is returned.
Examples:
Getting all of the feed types returned:
>>> status = sc.feed.status()
Getting the feed status for a specific type (e.g. `active`).
>>> status = sc.feeds.status('active')
'''
self._check('feed_type', feed_type, str, choices=[
'active', 'passive', 'lce', 'sc', 'all'])
if not feed_type:
return self._api.get('feed').json()['response']
else:
return self._api.get('feed/{}'.format(feed_type)).json()['response']
def update(self, feed_type=None):
'''
Initiates an on-line feed update based on the specified feed_type. If
no feed type is specified, then it will default to initiating an update
for all feed types.
:sc-api:`feed: update <Feed.html#FeedRESTReference-FeedUpdatePOSTType>`
Args:
feed_type (str, optional);
The feed type to specifically return. Valid types are `active`,
`passive`, `lce`, `sc`, or `all`.
Returns:
:obj:`None`: Update successfully requested.
'''
self._api.post('feed/{}/update'.format(
self._check('feed_type', feed_type, str, default='all', choices=[
'active', 'passive', 'lce', 'sc', 'all'])), json={})
def process(self, feed_type, fobj):
'''
Initiates an off-line feed update based on the specified feed_type using
the file object passed as the update file.
:sc-api:`feed: process <Feed.html#FeedRESTReference-FeedUpdatePOSTProcess>`
Args:
feed_type (str);
The feed type to specifically return. Valid types are `active`,
`passive`, `lce`, `sc`, or `all`.
fobj (FileObject):
The file object to upload into SecurityCenter and use as the
update package.
Returns:
:obj:`None`:
Update successfully requested.
Examples:
updating the active plugins:
>>> with open('sc-plugins-diff.tar.gz', 'rb') as plugfile:
... sc.feeds.process('active', plugfile)
'''
filename = self._api.files.upload(fobj)
self._api.post('feed/{}/process'.format(
self._check('feed_type', feed_type, str, choices=[
'active', 'passive', 'lce', 'sc'])),
json={'filename': filename})
|
py | b4062f10e8182c683be25503aa98a45c80d0d306 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble.pymumble_py3 as pymumble
import variables as var
import logging
import logging.handlers
import traceback
from packaging import version
import util
import command
import constants
from database import SettingsDatabase, MusicDatabase
import media.system
from media.item import ValidationFailedError, PreparationFailedError
from media.playlist import BasePlaylist
from media.cache import MusicCache
class MumbleBot:
version = '6.1.1'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info("bot: botamusique version %s, starting..." % self.version)
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.volume_set = var.config.getfloat('bot', 'volume')
if var.db.has_option('bot', 'volume'):
self.volume_set = var.db.getfloat('bot', 'volume')
self.volume = self.volume_set
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
if args.verbose:
self.log.setLevel(logging.DEBUG)
self.log.debug("Starting in DEBUG loglevel")
elif args.quiet:
self.log.setLevel(logging.ERROR)
self.log.error("Starting in ERROR loglevel")
var.user = args.user
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
self.exit = False
self.nb_exit = 0
self.thread = None
self.thread_stderr = None
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1
self.song_start_at = -1
self.last_ffmpeg_err = ""
self.read_pcm_size = 0
# self.download_threads = []
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
self.log.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
if var.config.getboolean("bot", "auto_check_update"):
th = threading.Thread(target=self.check_update, name="UpdateThread")
th.daemon = True
th.start()
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens,
debug=var.config.getboolean('debug', 'mumbleConnection'), certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(200000)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False)\
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.05)
self.ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=self.ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
if var.config.get("bot", "when_nobody_in_channel") not in ['pause', 'pause_resume', 'stop', 'nothing']:
self.log.warn('Config "when_nobody_in_channel" is not on of "pause", "pause_resume", "stop" or "nothing", falling back to "nothing".')
if var.config.get("bot", "when_nobody_in_channel", fallback='nothing') in ['pause', 'pause_resume', 'stop']:
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, self.users_changed)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, self.users_changed)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
self.exit = True
self.pause()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def check_update(self):
self.log.debug("update: checking for updates...")
new_version = util.new_release_version()
if version.parse(new_version) > version.parse(self.version):
self.log.info("update: new version %s found, current installed version %s." % (new_version, self.version))
self.send_channel_msg(constants.strings('new_version_found'))
else:
self.log.debug("update: no new version found.")
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
message = text.message.strip()
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
if message[0] in var.config.get('commands', 'command_symbol'):
# remove the symbol from the message
message = message[1:].split(' ', 1)
# use the first word as a command, the others one as parameters
if len(message) > 0:
command = message[0].lower()
parameter = ''
if len(message) > 1:
parameter = message[1].rstrip()
else:
return
self.log.info('bot: received command ' + command + ' - ' + parameter + ' by ' + user)
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
constants.strings('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
constants.strings('user_ban'))
return
if not self.is_admin(user) and parameter:
input_url = util.get_url_from_input(parameter)
if input_url:
for i in var.db.items("url_ban"):
if input_url == i[0]:
self.mumble.users[text.actor].send_text_message(
constants.strings('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
if not self.cmd_handle[command]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
constants.strings('not_in_my_channel'))
return
self.cmd_handle[command]['handle'](self, user, text, command, parameter)
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself[
'channel_id']:
self.mumble.users[text.actor].send_text_message(
constants.strings('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, parameter)
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
constants.strings('which_command', commands="<br>".join(matches)))
else:
self.mumble.users[text.actor].send_text_message(
constants.strings('bad_command', command=command))
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error("bot: command %s failed with error: %s\n" % (command_exc, error_traceback))
self.send_msg(constants.strings('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
def is_admin(self, user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Users changed
# =======================
def users_changed(self, user, message):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
if len(own_channel.get_users()) == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause":
self.send_channel_msg(constants.strings("auto_paused"))
elif len(own_channel.get_users()) == 1:
# if the bot is the only user left in the channel
self.log.info('bot: Other users in the channel left. Stopping music now.')
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.clear()
else:
self.pause()
# =======================
# Launch and Download
# =======================
def launch_music(self):
if var.playlist.is_empty():
return
assert self.wait_for_ready is False
music_wrapper = var.playlist.current_item()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ac', '1', '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
pipe_rd, pipe_wd = os.pipe()
util.pipe_no_wait(pipe_rd) # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=480)
self.is_pause = False
self.read_pcm_size = 0
self.song_start_at = -1
self.playhead = 0
self.last_volume_cycle_time = time.time()
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item() and var.playlist.next_item().type in ['url', 'url_from_playlist']:
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
next.validate()
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info("bot: start preparing item in thread: %s" % item.format_debug_string())
th.daemon = True
th.start()
return th
def _download(self, item):
ver = item.version
try:
item.prepare()
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
if item.version > ver:
var.playlist.version += 1
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
raw_music = ""
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = 'Wait for buffer %.3f' % self.mumble.sound_output.get_buffer_size()
time.sleep(0.01)
if self.thread:
# I get raw from ffmpeg thread
# move playhead forward
self._loop_status = 'Reading raw'
if self.song_start_at == -1:
self.song_start_at = time.time() - self.playhead
self.playhead = time.time() - self.song_start_at
raw_music = self.thread.stdout.read(480)
self.read_pcm_size += 480
try:
self.last_ffmpeg_err = self.thread_stderr.readline()
if self.last_ffmpeg_err:
self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n"))
except:
pass
if raw_music:
# Adjust the volume and send it to mumble
self.volume_cycle()
self.mumble.sound_output.add_sound(
audioop.mul(raw_music, 2, self.volume))
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if not self.is_pause and (self.thread is None or self.thread.poll() is not None):
# ffmpeg thread has gone. indicate that last song has finished, or something is wrong.
if self.read_pcm_size < 481 and len(var.playlist) > 0 and var.playlist.current_index != -1 \
and self.last_ffmpeg_err:
current = var.playlist.current_item()
self.log.error("bot: cannot play music %s", current.format_debug_string())
self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err)
self.last_ffmpeg_err = ""
self.send_channel_msg(constants.strings('unable_play', item=current.format_title()))
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
# move to the next song.
if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song.
if var.playlist.next():
current = var.playlist.current_item()
try:
current.validate()
if not current.is_ready():
self.log.info("bot: current music isn't ready, start downloading.")
self.async_download(current)
self.send_channel_msg(
constants.strings('download_in_progress', item=current.format_title()))
self.wait_for_ready = True
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
else:
self._loop_status = 'Empty queue'
else: # if wait_for_ready flag is true, means the pointer is already pointing to target song. start playing
current = var.playlist.current_item()
if current:
if current.is_ready():
self.wait_for_ready = False
self.launch_music()
self.async_download_next()
elif current.is_failed():
var.playlist.remove_by_id(current.id)
self.wait_for_ready = False
else:
self._loop_status = 'Wait for the next item to be ready'
else:
self.wait_for_ready = False
while self.mumble.sound_output.get_buffer_size() > 0:
# Empty the buffer before exit
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
self._loop_status = "exited"
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def volume_cycle(self):
delta = time.time() - self.last_volume_cycle_time
if self.on_ducking and self.ducking_release < time.time():
self.on_ducking = False
self._max_rms = 0
if delta > 0.001:
if self.is_ducking and self.on_ducking:
self.volume = (self.volume - self.ducking_volume) * math.exp(- delta / 0.2) + self.ducking_volume
else:
self.volume = self.volume_set - (self.volume_set - self.volume) * math.exp(- delta / 0.5)
self.last_volume_cycle_time = time.time()
def ducking_sound_received(self, user, sound):
rms = audioop.rms(sound.pcm, 2)
self._max_rms = max(rms, self._max_rms)
if self._display_rms:
if rms < self.ducking_threshold:
print('%6d/%6d ' % (rms, self._max_rms) + '-'*int(rms/200), end='\r')
else:
print('%6d/%6d ' % (rms, self._max_rms) + '-'*int(self.ducking_threshold/200)
+ '+'*int((rms - self.ducking_threshold)/200), end='\r')
if rms > self.ducking_threshold:
if self.on_ducking is False:
self.log.debug("bot: ducking triggered")
self.on_ducking = True
self.ducking_release = time.time() + 1 # ducking release after 1s
# =======================
# Play Control
# =======================
def clear(self):
# Kill the ffmpeg thread and empty the playlist
if self.thread:
self.thread.kill()
self.thread = None
var.playlist.clear()
self.wait_for_ready = False
self.log.info("bot: music stopped. playlist trashed.")
def stop(self):
self.interrupt()
self.is_pause = True
var.playlist.next()
self.wait_for_ready = True
self.log.info("bot: music stopped.")
def interrupt(self):
# Kill the ffmpeg thread
if self.thread:
self.thread.kill()
self.thread = None
self.song_start_at = -1
self.playhead = 0
def pause(self):
# Kill the ffmpeg thread
if self.thread:
self.pause_at_id = var.playlist.current_item().id
self.thread.kill()
self.thread = None
self.is_pause = True
self.song_start_at = -1
self.log.info("bot: music paused at %.2f seconds." % self.playhead)
def resume(self):
if var.playlist.current_index == -1:
var.playlist.next()
music_wrapper = var.playlist.current_item()
if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready():
self.is_pause = False
self.playhead = 0
return
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
self.log.info("bot: resume music at %.2f seconds" % self.playhead)
uri = music_wrapper.uri()
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-ss', "%f" % self.playhead, '-i',
uri, '-ac', '1', '-f', 's16le', '-ar', '48000', '-')
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(var.playlist.current_item().format_current_playing())
self.log.info("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
pipe_rd, pipe_wd = os.pipe()
util.pipe_no_wait(pipe_rd) # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=480)
self.last_volume_cycle_time = time.time()
self.pause_at_id = ""
self.is_pause = False
def start_web_interface(addr, port):
global formatter
import interface
# setup logger
werkzeug_logger = logging.getLogger('werkzeug')
logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile'))
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
werkzeug_logger.addHandler(handler)
interface.init_proxy()
interface.web.env = 'development'
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini',
help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str,
default=None, help='database file. Default: database.db')
parser.add_argument("-q", "--quiet", dest="quiet",
action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host",
type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user",
type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password",
type=str, help="Server password, if required")
parser.add_argument("-T", "--tokens", dest="tokens",
type=str, help="Server tokens, if required")
parser.add_argument("-p", "--port", dest="port",
type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel",
type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate",
type=str, default=None, help="Certificate file")
args = parser.parse_args()
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)],
encoding='utf-8')
var.dbfile = args.db if args.db is not None else util.solve_filepath(
config.get("bot", "database_path", fallback="database.db"))
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config))
sys.exit()
var.config = config
var.db = SettingsDatabase(var.dbfile)
# Setup logger
bot_logger = logging.getLogger("bot")
formatter = logging.Formatter('[%(asctime)s %(levelname)s %(threadName)s] %(message)s', "%b %d %H:%M:%S")
bot_logger.setLevel(logging.INFO)
logfile = util.solve_filepath(var.config.get('bot', 'logfile'))
handler = None
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
bot_logger.addHandler(handler)
logging.getLogger("root").addHandler(handler)
var.bot_logger = bot_logger
if var.config.get("bot", "save_music_library", fallback=True):
var.music_db = MusicDatabase(var.dbfile)
else:
var.music_db = MusicDatabase(":memory:")
var.cache = MusicCache(var.music_db)
# load playback mode
playback_mode = None
if var.db.has_option("playlist", "playback_mode"):
playback_mode = var.db.get('playlist', 'playback_mode')
else:
playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot")
if playback_mode in ["one-shot", "repeat", "random", "autoplay"]:
var.playlist = media.playlist.get_playlist(playback_mode)
else:
raise KeyError("Unknown playback mode '%s'" % playback_mode)
var.bot = MumbleBot(args)
command.register_all_commands(var.bot)
if var.config.get("bot", "refresh_cache_on_startup", fallback=True):
var.cache.build_dir_cache()
# load playlist
if var.config.getboolean('bot', 'save_playlist', fallback=True):
var.bot_logger.info("bot: load playlist from previous session")
var.playlist.load()
# Start the main loop.
var.bot.loop()
|
py | b4062f2b08a776b19622ea8dd0ed8946d11ec080 | from __future__ import print_function
#---------------------------------------------------------------------
# This script demonstrates the usage of hotkeys.
#
# Note: Hotkeys only work with the GUI version of IDA and not in
# text mode.
#
# Author: Gergely Erdelyi <[email protected]>
#---------------------------------------------------------------------
import idaapi
def foo():
print("Hotkey activated!")
# IDA binds hotkeys to IDC functions so a trampoline IDC function
# must be created
idaapi.compile_idc_text('static key_2() { RunPythonStatement("foo()"); }')
# Add the hotkey
add_idc_hotkey("2", 'key_2')
# Press 2 to activate foo()
# The hotkey can be removed with
# del_idc_hotkey('2')
|
py | b406306039bc189d60828a39e6e78f9fbb56ec16 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = os.urandom(24)
SQLALCHEMY_DATABASE_URI = os.environ["DATABASE_URL"]
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True |
py | b40630dda0fc5091cdb46bae1be237fc3896760f | from abc import ABCMeta, abstractmethod
class IGetAuthTokenUseCase(metaclass=ABCMeta):
@abstractmethod
def execute(self):
raise NotImplementedError
|
py | b40630e6de6973c1e3352515b15ef37bd232cfb3 | from itertools import repeat, product
import numpy as np
from scipy.optimize import fmin_slsqp
from scipy.special import expit
from girth.latent_ability_distribution import LatentPDF
from girth import (condition_polytomous_response,
validate_estimation_options)
from girth.polytomous_utils import _graded_partial_integral_md, _build_einsum_string
__all__ = ["multidimensional_ability_map", "multidimensional_ability_eap"]
def multidimensional_ability_map(dataset, difficulty, discrimination, options=None):
"""Estimates the abilities for dichotomous models.
Estimates the ability parameters (theta) for dichotomous models via
maximum a posterior likelihood estimation.
Args:
dataset: [n_items, n_participants] (2d Array) of measured responses
difficulty: (1d Array) of difficulty parameters for each item
discrimination: (1d Array) of disrimination parameters for each item
options: dictionary with updates to default options
Returns:
abilities: (2d array) estimated abilities
Options:
* distribution: callable
"""
n_factors = discrimination.shape[1]
if n_factors < 2:
raise AssertionError("Number of factors specified must be greater than 1.")
options = validate_estimation_options(options)
cpr_result = condition_polytomous_response(dataset, trim_ends=False)
responses, item_counts, valid_response_mask = cpr_result
invalid_response_mask = ~valid_response_mask
n_items = responses.shape[0]
difficulty = difficulty.reshape(n_items, -1)
abilities = np.zeros((n_factors, dataset.shape[1]))
# Initialize difficulty parameter storage
betas = np.full((item_counts.sum(),), 10000.0)
cumulative_item_counts = item_counts.cumsum()
start_indices = np.roll(cumulative_item_counts, 1)
start_indices[0] = 0
# Initialize discrimination parameters storage
local_discrimination = np.zeros((betas.size, n_factors))
for ndx in range(n_items):
end_ndx = cumulative_item_counts[ndx]
start_ndx = start_indices[ndx]
betas[(start_ndx + 1):end_ndx] = difficulty[ndx][:item_counts[ndx] - 1]
local_discrimination[start_ndx:end_ndx, :] = discrimination[ndx]
betas_roll = np.roll(betas, -1)
betas_roll[cumulative_item_counts-1] = -10000.0
# Set invalid index to zero, this allows minimal
# changes for invalid data and it is corrected
# during integration
responses[invalid_response_mask] = 0
distribution = options['distribution']
for person_ndx in range(abilities.shape[1]):
person_response = responses[:, person_ndx]
invalid_person_mask = invalid_response_mask[:, person_ndx]
def _person_function(estimates):
kernel = (local_discrimination * estimates).sum(1)
temp1 = kernel + betas
temp2 = kernel + betas_roll
graded_prob = expit(temp1)
graded_prob -= expit(temp2)
# Set all the responses and fix afterward
temp_output = graded_prob[person_response]
temp_output[invalid_person_mask] = 1.0
return -(np.log(temp_output).sum() + np.log(distribution(estimates)).sum())
abilities[:, person_ndx] = fmin_slsqp(_person_function, np.zeros((n_factors)),
bounds=[(-4, 4),] * n_factors,
disp=False)
return abilities
def multidimensional_ability_eap(dataset, difficulty, discrimination, options=None):
"""Estimates the abilities for dichotomous models.
Estimates the ability parameters (theta) for dichotomous models via
expected a posterior likelihood estimation.
Args:
dataset: [n_items, n_participants] (2d Array) of measured responses
difficulty: (1d Array) of difficulty parameters for each item
discrimination: (1d Array) of disrimination parameters for each item
options: dictionary with updates to default options
Returns:
abilities: (2d array) estimated abilities
Options:
* distribution: callable
* quadrature_bounds: (float, float)
* quadrature_n: int
"""
n_factors = discrimination.shape[1]
if n_factors < 2:
raise AssertionError("Number of factors specified must be greater than 1.")
options = validate_estimation_options(options)
cpr_result = condition_polytomous_response(dataset, trim_ends=False)
responses, item_counts, valid_response_mask = cpr_result
invalid_response_mask = ~valid_response_mask
n_items = responses.shape[0]
difficulty = difficulty.reshape(n_items, -1)
# Multi-dimensional Quadrature Locations
latent_pdf = LatentPDF(options)
theta = np.asarray(list(product(*repeat(latent_pdf.quadrature_locations,
n_factors)))).T
dist_x_weight = latent_pdf(None, 0)
einsum_string = _build_einsum_string(n_factors)
distribution_x_weight = np.einsum(einsum_string,
*repeat(dist_x_weight, n_factors)).flatten()
# Initialize difficulty parameter storage
betas = np.full((item_counts.sum(),), 10000.0)
cumulative_item_counts = item_counts.cumsum()
start_indices = np.roll(cumulative_item_counts, 1)
start_indices[0] = 0
# Initialize discrimination parameters storage
local_discrimination = np.zeros((betas.size, n_factors))
for ndx in range(n_items):
end_ndx = cumulative_item_counts[ndx]
start_ndx = start_indices[ndx]
betas[(start_ndx + 1):end_ndx] = difficulty[ndx][:item_counts[ndx] - 1]
local_discrimination[start_ndx:end_ndx, :] = discrimination[ndx]
betas_roll = np.roll(betas, -1)
betas_roll[cumulative_item_counts-1] = -10000.0
# Set invalid index to zero, this allows minimal
# changes for invalid data and it is corrected
# during integration
responses[invalid_response_mask] = 0
# Compute partial int for calculations
partial_int = np.ones((responses.shape[1], theta.shape[1]))
for item_ndx in range(n_items):
partial_int *= _graded_partial_integral_md(theta, betas, betas_roll,
local_discrimination,
responses[item_ndx],
invalid_response_mask[item_ndx])
# Loop over each dimension and compute
abilities = np.zeros((n_factors, dataset.shape[1]))
adjustment = ([latent_pdf.quadrature_locations]
+ [np.ones_like(latent_pdf.quadrature_locations),]
* (n_factors-1))
denominator = (partial_int * distribution_x_weight)
denominator_sum = denominator.sum(1)
for factor_ndx in range(n_factors):
linear_adjustment = np.einsum(einsum_string, *adjustment).flatten()
numerator = denominator * linear_adjustment
abilities[factor_ndx] = numerator.sum(1) / denominator_sum
adjustment.insert(0, adjustment.pop())
return abilities
|
py | b406319c54d8705bf5e97b04125fa2c94c40f238 | r"""
Implements a buffer with insertion points. When you know you need to
"get back" to a place and write more later, simply call insertion_point()
at that spot and get a new StringIOTree object that is "left behind".
EXAMPLE:
>>> a = StringIOTree()
>>> _= a.write('first\n')
>>> b = a.insertion_point()
>>> _= a.write('third\n')
>>> _= b.write('second\n')
>>> a.getvalue().split()
['first', 'second', 'third']
>>> c = b.insertion_point()
>>> d = c.insertion_point()
>>> _= d.write('alpha\n')
>>> _= b.write('gamma\n')
>>> _= c.write('beta\n')
>>> b.getvalue().split()
['second', 'alpha', 'beta', 'gamma']
>>> i = StringIOTree()
>>> d.insert(i)
>>> _= i.write('inserted\n')
>>> out = StringIO()
>>> a.copyto(out)
>>> out.getvalue().split()
['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
"""
from __future__ import absolute_import #, unicode_literals
try:
# Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2.
from cStringIO import StringIO
except ImportError:
from io import StringIO
class StringIOTree(object):
"""
See module docs.
"""
def __init__(self, stream=None):
self.prepended_children = []
if stream is None:
stream = StringIO()
self.stream = stream
self.write = stream.write
self.markers = []
def getvalue(self):
content = [x.getvalue() for x in self.prepended_children]
content.append(self.stream.getvalue())
return "".join(content)
def copyto(self, target):
"""Potentially cheaper than getvalue as no string concatenation
needs to happen."""
for child in self.prepended_children:
child.copyto(target)
stream_content = self.stream.getvalue()
if stream_content:
target.write(stream_content)
def commit(self):
# Save what we have written until now so that the buffer
# itself is empty -- this makes it ready for insertion
if self.stream.tell():
self.prepended_children.append(StringIOTree(self.stream))
self.prepended_children[-1].markers = self.markers
self.markers = []
self.stream = StringIO()
self.write = self.stream.write
def insert(self, iotree):
"""
Insert a StringIOTree (and all of its contents) at this location.
Further writing to self appears after what is inserted.
"""
self.commit()
self.prepended_children.append(iotree)
def insertion_point(self):
"""
Returns a new StringIOTree, which is left behind at the current position
(it what is written to the result will appear right before whatever is
next written to self).
Calling getvalue() or copyto() on the result will only return the
contents written to it.
"""
# Save what we have written until now
# This is so that getvalue on the result doesn't include it.
self.commit()
# Construct the new forked object to return
other = StringIOTree()
self.prepended_children.append(other)
return other
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
|
py | b406321ed9f5a3ef248fd20ed16985b748b932de | #!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
from datetime import timedelta
import os
import time
import pytz
from dotenv import load_dotenv
load_dotenv(verbose=True)
#---------------------------#
# APPLICATION CONFIGURATION #
#---------------------------#
"""
Variable controls what timezone formatting we must apply to our UTC datetimes.
"""
LOCAL_TIMEZONE = pytz.timezone(os.getenv("LOCAL_TIMEZONE_NAME"))
"""
Variable controls where to save the `wpa_supplicant_conf` file to.
"""
WPA_SUPPLICANT_CONF = os.getenv("WPA_SUPPLICANT_CONF")
SUDO_MODE = os.getenv("SUDO_MODE")
#-------------------#
# UTILITY FUNCTIONS #
#-------------------#
def getDT():
"""
Function will return the current datetime aware of the local timezone.
"""
# Get our UTC datetime without any timezone awareness.
naive_now_dt = datetime.datetime.utcnow()
# Convert to our local timezone.
utc_aware_now_dt = naive_now_dt.replace(tzinfo=pytz.utc) # Make UTC timezone aware.
local_aware_now_dt = utc_aware_now_dt.astimezone(LOCAL_TIMEZONE) # Convert to local timezone.
# Return our datetime converted to our local timezone.
return local_aware_now_dt
|
py | b406333074ec1264e3e0c32523b9c9b02086f457 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
from loguru import logger as logger_
logger = logger_
"""
:说明:
日志记录器对象。
:默认信息:
* 格式: ``[%(asctime)s %(name)s] %(levelname)s: %(message)s``
* 等级: ``DEBUG`` / ``INFO`` ,根据 config 配置改变
* 输出: 输出至 stdout
:用法:
.. code-block:: python
from epicteller.log import logger
"""
# default_handler = logging.StreamHandler(sys.stdout)
# default_handler.setFormatter(
# logging.Formatter("[%(asctime)s %(name)s] %(levelname)s: %(message)s"))
# logger.addHandler(default_handler)
class Filter:
def __init__(self) -> None:
self.level = "DEBUG"
def __call__(self, record):
record["name"] = record["name"].split(".")[0]
levelno = logger.level(self.level).no
return record["level"].no >= levelno
class LoguruHandler(logging.Handler):
def emit(self, record):
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth,
exception=record.exc_info).log(level, record.getMessage())
logger.remove()
default_filter = Filter()
default_format = (
"<g>{time:MM-DD HH:mm:ss}</g> "
"[<lvl>{level}</lvl>] "
"<c><u>{name}</u></c> | "
# "<c>{function}:{line}</c>| "
"{message}")
logger.add(sys.stdout,
colorize=True,
diagnose=False,
filter=default_filter,
format=default_format)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.