ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a484d2310d0a815e25f197a93e44f6710f7319f | """
disk_dict.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import cPickle
from w3af.core.data.misc.cpickle_dumps import cpickle_dumps
from w3af.core.data.fuzzer.utils import rand_alpha
from w3af.core.data.db.dbms import get_default_temp_db_instance
class DiskDict(object):
"""
It's a dict that stores items in a sqlite3 database and has the following
features:
- Dict-like API
- Is thread safe
- Deletes the table when the instance object is deleted
:author: Andres Riancho ([email protected])
"""
def __init__(self, table_prefix=None):
self.db = get_default_temp_db_instance()
prefix = '' if table_prefix is None else ('%s_' % table_prefix)
self.table_name = 'disk_dict_' + prefix + rand_alpha(30)
# Create table
# DO NOT add the AUTOINCREMENT flag to the table creation since that
# will break __getitem__ when an item is removed, see:
# http://www.sqlite.org/faq.html#q1
columns = [('index_', 'INTEGER'),
('key', 'BLOB'),
('value', 'BLOB')]
pks = ['index_']
self.db.create_table(self.table_name, columns, pks)
self.db.create_index(self.table_name, ['key'])
self.db.commit()
def cleanup(self):
self.db.drop_table(self.table_name)
def keys(self):
pickled_keys = self.db.select('SELECT key FROM %s' % self.table_name)
result_list = []
for r in pickled_keys:
result_list.append(cPickle.loads(r[0]))
return result_list
def iterkeys(self):
pickled_keys = self.db.select('SELECT key FROM %s' % self.table_name)
for r in pickled_keys:
yield cPickle.loads(r[0])
def iteritems(self):
pickled_keys = self.db.select('SELECT key, value FROM %s' % self.table_name)
for r in pickled_keys:
yield cPickle.loads(r[0]), cPickle.loads(r[1])
def __contains__(self, key):
"""
:return: True if the value is in keys
"""
# Adding the "limit 1" to the query makes it faster, as it won't
# have to scan through all the table/index, it just stops on the
# first match.
query = 'SELECT count(*) FROM %s WHERE key=? limit 1' % self.table_name
r = self.db.select_one(query, (cpickle_dumps(key),))
return bool(r[0])
def __delitem__(self, key):
"""
Delete the key from the dict
:param key: The key to delete
:return: None
"""
query = 'DELETE FROM %s WHERE key = ?' % self.table_name
self.db.execute(query, (cpickle_dumps(key),))
def __setitem__(self, key, value):
# Test if it is already in the DB:
if key in self:
query = 'UPDATE %s SET value = ? WHERE key=?' % self.table_name
self.db.execute(query, (cpickle_dumps(value),
cpickle_dumps(key)))
else:
query = "INSERT INTO %s VALUES (NULL, ?, ?)" % self.table_name
self.db.execute(query, (cpickle_dumps(key),
cpickle_dumps(value)))
def __getitem__(self, key):
query = 'SELECT value FROM %s WHERE key=? limit 1' % self.table_name
r = self.db.select(query, (cpickle_dumps(key),))
if not r:
args = (key, self.table_name)
raise KeyError('%s not in %s.' % args)
return cPickle.loads(r[0][0])
def __len__(self):
query = 'SELECT count(*) FROM %s' % self.table_name
r = self.db.select_one(query)
return r[0]
def get(self, key, default=-456):
try:
return self[key]
except KeyError:
if default is not -456:
return default
raise KeyError()
def pop(self, key, default=-456):
value = self.get(key, default=default)
del self[key]
return value
|
py | 1a484d540e55931951fc16f8714ba8678edb4736 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that suppors using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
py | 1a484db62ace1ec5577f68686456cd0c4f2be679 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RZoo(RPackage):
"""S3 Infrastructure for Regular and Irregular Time Series (Z's Ordered
Observations).
An S3 class with methods for totally ordered indexed observations. It is
particularly aimed at irregular time series of numeric vectors/matrices and
factors. zoo's key design goals are independence of a particular
index/date/time class and consistency with ts and base R by providing
methods to extend standard generics."""
cran = "zoo"
version('1.8-9', sha256='b7be259067a8b9d4a8f5d387e0946a5ba1eb43474baa67ccf4f8bf4b15f772a3')
version('1.8-8', sha256='4e8cc4065047ba12e103b9664f3b607c770673096e9c2b694fad2b2ec3203ce7')
version('1.8-6', sha256='2217a4f362f2201443b5fdbfd9a77d9a6caeecb05f02d703ee8b3b9bf2af37cc')
version('1.8-5', sha256='8773969973d28d7d1a48f74b73be1dbd97acb3b22a4668a102e8bb585a7de826')
version('1.7-14', sha256='4858675fed056a4329c4998517cc944db386447483390bd342de719e0509f598')
version('1.7-13', sha256='0ca5264d6077c785963705e462aec3e57e0d0651379f9bf4ee32e4f3b25dc754')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@1.8-2:')
depends_on('[email protected]:', type=('build', 'run'))
|
py | 1a484e3839e5be5e2d36e8dd5a4a222f5dcb22ea | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""ploting tools."""
import numpy as np
import nibabel as nb
import pandas as pd
from nilearn.signal import clean
import matplotlib.pyplot as plt
from matplotlib import gridspec as mgs
import seaborn as sns
from niworkflows.viz.plots import plot_carpet as plot_carpetX
from ..utils import read_ndata
def plotimage(img,out_file):
fig = plt.figure(constrained_layout=False, figsize=(30, 15))
from nilearn.plotting import plot_anat
plot_anat(img,draw_cross=False,figure=fig)
fig.savefig(out_file,bbox_inches="tight", pad_inches=None)
return out_file
def plot_svg(fdata,fd,dvars,filename,tr=1):
'''
plot carpetplot with fd and dvars
------------
fdata:
4D ndarray
fd:
framewise displacement
dvars:
dvars
filename
filename
tr:
repetion time
'''
fig = plt.figure(constrained_layout=False, figsize=(30, 15))
grid = mgs.GridSpec(3, 1, wspace=0.0, hspace=0.05,
height_ratios=[1] * (3 - 1) + [5])
confoundplot(fd, grid[0], tr=tr, color='b', name='FD')
confoundplot(dvars, grid[1], tr=tr, color='r', name='DVARS')
plot_carpet(func_data=fdata,subplot=grid[-1], tr=tr,)
fig.savefig(filename,bbox_inches="tight", pad_inches=None)
def compute_dvars(datat):
'''
compute standard dvars
datat : numpy darrays
data matrix vertices by timepoints
'''
firstcolumn=np.zeros((datat.shape[0]))[...,None]
datax=np.hstack((firstcolumn,np.diff(datat)))
datax_ss=np.sum(np.square(datax),axis=0)/datat.shape[0]
return np.sqrt(datax_ss)
def plot_carpet(func_data,detrend=True, nskip=0, size=(950, 800),
subplot=None, title=None, output_file=None, legend=False,
tr=None):
"""
Plot an image representation of voxel intensities across time also know
as the "carpet plot"
from Niworkflows
Parameters
----------
func_data :
4D ndarray
detrend : boolean, optional
Detrend and standardize the data prior to plotting.
nskip : int
Number of volumes at the beginning of the scan marked to be excluded.
title : string, optional
The title displayed on the figure.
output_file : string, or None, optional
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
legend : bool
Whether to render the average functional series with ``atlaslabels`` as
overlay.
tr : float , optional
Specify the TR, if specified it uses this value. If left as None,
# Frames is plotted instead of time.
"""
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1
ntsteps = func_data.shape[-1]
data = func_data.reshape(-1, ntsteps)
p_dec = 1 + data.shape[0] // size[0]
if p_dec:
data = data[::p_dec, :]
t_dec = 1 + data.shape[1] // size[1]
if t_dec:
data = data[:, ::t_dec]
# Detrend data
v = (None, None)
if detrend:
data = clean(data.T, t_r=tr).T
v = (-2, 2)
# If subplot is not defined
if subplot is None:
subplot = mgs.GridSpec(1, 1)[0]
# Define nested GridSpec
wratios = [1, 100, 20]
gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
width_ratios=wratios[:2 + int(legend)],
wspace=0.0)
# Carpet plot
ax1 = plt.subplot(gs[1])
ax1.imshow(data, interpolation='nearest', aspect='auto', cmap='gray',
vmin=v[0], vmax=v[1])
ax1.grid(False)
ax1.set_yticks([])
ax1.set_yticklabels([])
# Set 10 frame markers in X axis
interval = max((int(data.shape[-1] + 1) //
10, int(data.shape[-1] + 1) // 5, 1))
xticks = list(range(0, data.shape[-1])[::interval])
ax1.set_xticks(xticks)
if notr:
ax1.set_xlabel('time (frame #)')
else:
ax1.set_xlabel('time (s)')
labels = tr * (np.array(xticks)) * t_dec
ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=10)
# Remove and redefine spines
for side in ["top", "right"]:
ax1.spines[side].set_color('none')
ax1.spines[side].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_color('none')
ax1.spines["left"].set_visible(False)
if output_file is not None:
figure = plt.gcf()
figure.savefig(output_file, bbox_inches='tight')
plt.close(figure)
figure = None
return output_file
return [ax1], gs
def confoundplot(tseries, gs_ts, gs_dist=None, name=None,
units=None, tr=None, hide_x=True, color='b', nskip=0,
cutoff=None, ylims=None):
'''
adapted from niworkflows
tseries:
numpy array
gs_ts:
GridSpec
name:
file name
units:
tseries unit
tr:
repetition time
'''
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1.
ntsteps = len(tseries)
tseries = np.array(tseries)
# Define nested GridSpec
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts,
width_ratios=[1, 100], wspace=0.0)
ax_ts = plt.subplot(gs[1])
ax_ts.grid(False)
# Set 10 frame markers in X axis
interval = max((ntsteps // 10, ntsteps // 5, 1))
xticks = list(range(0, ntsteps)[::interval])
ax_ts.set_xticks(xticks)
if not hide_x:
if notr:
ax_ts.set_xlabel('time (frame #)')
else:
ax_ts.set_xlabel('time (s)')
labels = tr * np.array(xticks)
ax_ts.set_xticklabels(['%.02f' % t for t in labels.tolist()])
else:
ax_ts.set_xticklabels([])
if name is not None:
if units is not None:
name += ' [%s]' % units
ax_ts.annotate(
name, xy=(0.0, 0.7), xytext=(0, 0), xycoords='axes fraction',
textcoords='offset points', va='center', ha='left',
color=color, size=20,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none',
'color': 'none', 'lw': 0, 'alpha': 0.8})
for side in ["top", "right"]:
ax_ts.spines[side].set_color('none')
ax_ts.spines[side].set_visible(False)
if not hide_x:
ax_ts.spines["bottom"].set_position(('outward', 20))
ax_ts.xaxis.set_ticks_position('bottom')
else:
ax_ts.spines["bottom"].set_color('none')
ax_ts.spines["bottom"].set_visible(False)
# ax_ts.spines["left"].set_position(('outward', 30))
ax_ts.spines["left"].set_color('none')
ax_ts.spines["left"].set_visible(False)
# ax_ts.yaxis.set_ticks_position('left')
ax_ts.set_yticks([])
ax_ts.set_yticklabels([])
nonnan = tseries[~np.isnan(tseries)]
if nonnan.size > 0:
# Calculate Y limits
valrange = (nonnan.max() - nonnan.min())
def_ylims = [nonnan.min() - 0.1 * valrange,
nonnan.max() + 0.1 * valrange]
if ylims is not None:
if ylims[0] is not None:
def_ylims[0] = min([def_ylims[0], ylims[0]])
if ylims[1] is not None:
def_ylims[1] = max([def_ylims[1], ylims[1]])
# Add space for plot title and mean/SD annotation
def_ylims[0] -= 0.1 * (def_ylims[1] - def_ylims[0])
ax_ts.set_ylim(def_ylims)
# Annotate stats
maxv = nonnan.max()
mean = nonnan.mean()
stdv = nonnan.std()
p95 = np.percentile(nonnan, 95.0)
else:
maxv = 0
mean = 0
stdv = 0
p95 = 0
stats_label = (r'max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} '
r'$\bullet$ $\sigma$: {sigma:.3f}').format(
max=maxv, mean=mean, units=units or '', sigma=stdv)
ax_ts.annotate(
stats_label, xy=(0.98, 0.7), xycoords='axes fraction',
xytext=(0, 0), textcoords='offset points',
va='center', ha='right', color=color, size=10,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none', 'color': 'none',
'lw': 0, 'alpha': 0.8}
)
# Annotate percentile 95
ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=.1, color='lightgray')
ax_ts.annotate(
'%.2f' % p95, xy=(0, p95), xytext=(-1, 0),
textcoords='offset points', va='center', ha='right',
color='lightgray', size=3)
if cutoff is None:
cutoff = []
for thr in enumerate(cutoff):
ax_ts.plot((0, ntsteps - 1), [thr] * 2,
linewidth=.2, color='dimgray')
ax_ts.annotate(
'%.2f' % thr, xy=(0, thr), xytext=(-1, 0),
textcoords='offset points', va='center', ha='right',
color='dimgray', size=3)
ax_ts.plot(tseries, color=color, linewidth=1.5)
ax_ts.set_xlim((0, ntsteps - 1))
if gs_dist is not None:
ax_dist = plt.subplot(gs_dist)
sns.distplot(tseries, vertical=True, ax=ax_dist)
ax_dist.set_xlabel('Timesteps')
ax_dist.set_ylim(ax_ts.get_ylim())
ax_dist.set_yticklabels([])
return [ax_ts, ax_dist], gs
return ax_ts, gs
# for executive summmary report
# Azeez Adebimpe, 2021
def plotseries(conf,gs_ts,ylim=None,ylabelx=None,hide_x=None,tr=None,ax=None):
colums =conf.columns
notr = False
if tr is None:
notr = True
tr = 1.
xtick = np.linspace(0,conf.shape[0]*tr,num=conf.shape[0])
plt.style.use('seaborn-white')
plt.xticks(color='k')
plt.yticks(color='k')
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts,
width_ratios=[1, 100], wspace=0.0)
ax= plt.subplot(gs[1])
ax.grid(False)
for k in colums:
ax.plot(xtick,conf[k],label=k,linewidth=2)
if ylim:
ax.set_ylim(ylim)
else:
ax.set_ylim([-2*conf[k].max(),2*conf[k].max()])
ax.set_ylabel(ylabelx,fontsize=20)
ax.legend(fontsize=20)
last = conf.shape[0] - 1
interval = max((last // 10, last // 5, 1))
ax.set_xlim(0, last)
if not hide_x:
xticks = list(range(0, last)[::interval])
else:
xticks = []
ax.set_xticks(xticks)
if not hide_x:
if tr is None:
ax.set_xlabel("time (frame #)")
else:
ax.set_xlabel("time (s)")
ax.set_xticklabels(["%.01f" % t for t in (tr * np.array(xticks)).tolist()])
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
return ax
def plot_svgx(rawdata,regdata,resddata,fd,filenamebf,filenameaf,mask=None,seg=None,tr=1):
'''
generate carpet plot with dvars, fd, and WB
------------
rawdata:
nifti or cifti
regdata:
nifti or cifti after nuissance regression
resddata:
nifti or cifti after regression and filtering
mask:
mask for nifti if available
seg:
3 tissues seg files
tr:
repetition times
fd:
framewise displacement
filenamebf:
output file svg before processing
filenameaf:
output file svg after processing
'''
rxdata = compute_dvars(read_ndata(datafile=rawdata,maskfile=mask))
rgdata = compute_dvars(read_ndata(datafile=regdata,maskfile=mask))
rsdata = compute_dvars(read_ndata(datafile=resddata,maskfile=mask))
rgdata = compute_dvars(read_ndata(datafile=rawdata,maskfile=mask))
conf = pd.DataFrame({'Pre reg': rxdata, 'Post reg': rgdata, 'Post all': rsdata})
fdx = pd.DataFrame({'FD':np.loadtxt(fd)})
rw = read_ndata(datafile=rawdata,maskfile=mask)
rs = read_ndata(datafile=resddata,maskfile=mask)
wbbf = pd.DataFrame({'Mean':np.nanmean(rw,axis=0),'Std':np.nanstd(rw,axis=0)})
wbaf = pd.DataFrame({'Mean':np.nanmean(rs,axis=0),'Std':np.nanstd(rs,axis=0)})
if seg is not None:
atlaslabels = nb.load(seg).get_fdata()
else:
atlaslabels = None
#
plt.cla()
plt.clf()
figx = plt.figure(constrained_layout=True, figsize=(45,60))
grid = mgs.GridSpec(4, 1, wspace=0.0, hspace=0.05,height_ratios=[1,1,2.5,1])
confoundplotx(tseries=conf,gs_ts=grid[0],tr=tr,ylabel='DVARS',hide_x=True)
confoundplotx(tseries=wbbf,gs_ts=grid[1],tr=tr,hide_x=True,ylabel='WB')
plot_carpetX(func=rawdata,atlaslabels=atlaslabels,tr=tr,subplot=grid[2],legend=True,title='Raw')
confoundplotx(tseries=fdx,gs_ts=grid[3],tr=tr,hide_x=False,ylims=[0,1],ylabel='FD[mm]')
figx.savefig(filenamebf,bbox_inches="tight", pad_inches=None,dpi=300)
plt.cla()
plt.clf()
figy = plt.figure(constrained_layout=True, figsize=(45,60))
grid = mgs.GridSpec(4, 1, wspace=0.0, hspace=0.05,height_ratios=[1,1,2.5,1])
confoundplotx(tseries=conf,gs_ts=grid[0],tr=tr,ylabel='DVARS',hide_x=True)
confoundplotx(tseries=wbaf,gs_ts=grid[1],tr=tr,hide_x=True,ylabel='WB')
plot_carpetX(func=resddata,atlaslabels=atlaslabels,tr=tr,subplot=grid[2],legend=True,title='Processed')
confoundplotx(tseries=fdx,gs_ts=grid[3],tr=tr,hide_x=False,ylims=[0,1],ylabel='FD[mm]')
figy.savefig(filenameaf,bbox_inches="tight", pad_inches=None,dpi=300)
return filenamebf,filenameaf
def confoundplotx(
tseries,
gs_ts,
tr=None,
hide_x=True,
ylims=None,
ylabel=None
):
import seaborn as sns
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1.0
ntsteps = tseries.shape[0]
#tseries = np.array(tseries)
# Define nested GridSpec
gs = mgs.GridSpecFromSubplotSpec(
1, 2, subplot_spec=gs_ts, width_ratios=[1, 100], wspace=0.0
)
ax_ts = plt.subplot(gs[1])
ax_ts.grid(False)
# Set 10 frame markers in X axis
interval = max((ntsteps // 10, ntsteps // 5, 1))
xticks = list(range(0, ntsteps)[::interval])
ax_ts.set_xticks(xticks)
if not hide_x:
if notr:
ax_ts.set_xlabel("Time (frame #)")
else:
ax_ts.set_xlabel("Time (s)")
labels = tr * np.array(xticks)
ax_ts.set_xticklabels(["%.01f" % t for t in labels.tolist()])
else:
ax_ts.set_xticklabels([])
if ylabel:
ax_ts.set_ylabel(ylabel)
columns= tseries.columns
maxim_value =[]
minim_value =[]
for c in columns:
ax_ts.plot(tseries[c],label=c, linewidth=3)
maxim_value.append(max(tseries[c]))
minim_value.append(min(tseries[c]))
minx_value = [abs(x) for x in minim_value]
ax_ts.set_xlim((0, ntsteps - 1))
ax_ts.legend(fontsize=30)
if ylims:
ax_ts.set_ylim(ylims)
else:
ax_ts.set_ylim([-1.5*max(minx_value),1.5*max(maxim_value)])
for item in ([ax_ts.title, ax_ts.xaxis.label, ax_ts.yaxis.label] +
ax_ts.get_xticklabels() + ax_ts.get_yticklabels()):
item.set_fontsize(30)
return ax_ts, gs
|
py | 1a485023853d3c92991f688ef7a4c81e102a328a | def load_config(fname):
config_dict = {}
with open(fname, 'r') as f:
for line in f:
if line[0] in ('#', '\n'):
continue
(key, val) = line.split()[:2]
try:
val = eval(val)
except SyntaxError:
pass
config_dict[key] = val
return config_dict
def save_config(config, fname):
with open(fname, 'w') as f:
for key, value in config.items():
f.write("%s\t%s\n" % (key, value))
|
py | 1a4852cfe35133d0f65c363ce430f06346ed7df5 | from setuptools import setup , setuptools
# reading long description from file
with open("README.md", "r") as fh:
long_description = fh.read()
# specify requirements of your package here
REQUIREMENTS = []
# some more details
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
# calling the setup function
setup(name='dirman',
version='1.0.1',
description='extract dir given its root path',
long_description_content_type="text/markdown",
long_description=long_description,
url='https://github.com/sonusharma07/dirman',
author='sonu sharma',
author_email='[email protected]',
license='MIT',
packages=setuptools.find_packages(),
classifiers=CLASSIFIERS,
install_requires=REQUIREMENTS,
keywords='os path dir'
)
|
py | 1a4852e687f0a1a7579ceb887d4f526d87695478 | #from YourClassParentDir.YourClass import YourClass
from math import floor
import cv2
import numpy as np
import pandas as pd
#import multiprocessing as mp
from video_process.tracktor import Tracktor
class VideoCapture:
"""
VideoCapture is a class that takes a video, processes it, and returns it.
This means that VideoCapture is responsible for working with the data (Tracktor)
managing the data, adding and removing tracktor objects from the video as well as
retreiving and exporting data.
It is also related for video related functions such as play and pause.
Parameters
----------
video_source: string
This is the directory of the video that is to be processed.
"""
def __init__(self, video_source=""):
# Open the video source
self.cap = cv2.VideoCapture(video_source)
if not self.cap.isOpened():
raise ValueError("Unable to open video source", video_source)
#print(cv2.getBuildInformation())
#print(cv2.ocl.haveOpenCL())
#cv2.ocl.setUseOpenCL(True)
# Get video source width, height (resolution) and video length in frames
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.length = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.FPS = 60
#set the video framerate
self.cap.set(cv2.CAP_PROP_FPS, self.FPS)
#current frame is used to know what frame it is, as well as assigning frames
self.current_frame = 10
self.last_frame = self.current_frame
#playstate is used to play/pause the video
self.play_state = False
#working number is the index for which tracktor to process
self.working_number = 0
self.trackers = []
#a list of tuples with position to track and frame to assign on.
self.track_history = []
#List of gps coordinates, possible triangulation
self.gps_coord = []
#Ground Sample Distance variables
self.cam_distance = 0
self.cam_focal_length = 0
self.cam_sensor_width = 0
self.cam_sensor_height = 0
#the path to export the data
self.output_path = "./output/"
#tracking constants for getting frame types
self.TRACK_ALL = -1
self.NO_TRACKING = -2
#zoom variable for setting focused frame
self.zoom = 1
def draw_gps(self):
"""
Draws the GPS coordinated onto the current frame.
If 1 point, draw a circle
If 2 points, draw a line and calculate distance
If points > 3, draw a polygon and calculate distance of all the edges
"""
pass
def calculate_location(self, pos_x, pos_y):
"""
Calculates GPS location of a point passed in.
Based on the GPS points, the location will calculate distance and direction
to find the location.
"""
pass
def calculate_size(self, tracktor):
"""
Based on calculated distance of the GPS coordinates, size of the object
is calculated.
This should be the distance between the two farthest points (extreme points)
Based on pixel length, calculate the related length.
"""
pass
def create_tracker_pos(self, pos_x, pos_y):
"""
This function creates a new coordinate in history according to current frame
"""
if self.working_number >= 0:
location = (pos_x, pos_y, self.working_number, self.current_frame)
self.track_history.append(location)
print("Adding clicked location to", end="")
print(self.track_history[-1])
def delete_tracker_pos(self, frame_number):
"""
This function removes an assignment on a given frame
"""
def set_tracker_pos(self, tracktor):
"""
This function sets the tracker position at a given frame
"""
for i in range(len(self.track_history)):
#if frame number is equal to set frame ex: (x,y,working_number,frame)
if self.current_frame == self.track_history[i][3]:
tracktor_index = self.find_tracker_index_by_id(tracktor.id)
#if the saved tracktor in the list matches the saved working_number
if tracktor_index == self.track_history[i][2]:
#assign that tracktor's clicked to the saved coordinates(x,y)
self.trackers[tracktor_index].clicked = (self.track_history[i][0],
self.track_history[i][1])
# print("Assigning point from history at:", end="")
# print(self.track_history[i])
def play(self):
"""
Sets the play_state of the video to play, if not already.
"""
if self.play_state is False:
self.play_state = True
def pause(self):
"""
Sets the play_state of the video to pause, if not already.
"""
#pause only if play is set
if self.play_state is True:
print("Pausing")
self.play_state = False
def set_frame(self, value):
"""
Sets the current frame to process to the value passed in.
Parameters
----------
Value: float
Assigns the current_frame
"""
value = floor(float(value))
self.current_frame = value
self.cap.set(cv2.CAP_PROP_POS_FRAMES, value)
def previous_frame(self):
"""
Sets the current frame to process to the previous frame.
"""
self.set_frame(self.current_frame-1)
def next_frame(self):
"""
Sets the current frame to process to the next frame.
"""
self.set_frame(self.current_frame+1)
def add_tracker(self):
"""
Appends a Tracktor object to the trackers list.
"""
self.trackers.append(Tracktor())
def delete_tracker(self, index):
"""
!NOT COMPLETE!
Removes a tracktor object from the trackers list
"""
del self.trackers[index]
#search the list of trackers by name and return -1 if not fouond
def find_tracker_index_by_id(self, name):
"""
Finds the index in trackers where the name matches the tracktor's id.
Parameters
----------
name: string
compared to the tracktor's id
"""
if name == "None":
return self.NO_TRACKING
elif name == "All":
return self.TRACK_ALL
else:
for i in range(len(self.trackers)):
if name == self.trackers[i].id:
return i
return -1
def set_tracker_offset(self, value):
"""
Sets the working_number tracktor's offset to the value passed in.
Offset is the constant subtracted from the mean value within the block
Parameters
----------
value: float
"""
self.trackers[self.working_number].offset = value
def set_tracker_blocksize(self, value):
"""
Sets the working_number tracktor's block_size to the value passed in.
block_size determines the width of the kernel used for adaptive thresholding.
Note: block_size must be odd. This is automatically handled.
Parameters
----------
value: float
"""
if value % 2 == 0:
value += 1
self.trackers[self.working_number].block_size = value
def set_tracker_minarea(self, value):
"""
Sets the working_number tracktor's min_area to the value passed in.
min_area is the minimum area threhold used to detect the object of interest.
Parameters
----------
value: float
"""
self.trackers[self.working_number].min_area = value
def set_tracker_maxarea(self, value):
"""
Sets the working_number tracktor's max_area to the value passed in.
max_area is the maximum area threhold used to detect the object of interest.
Parameters
----------
value: float
"""
self.trackers[self.working_number].max_area = value
def set_zoom(self, value):
"""
Sets the zoom to adjust region of interest on a specific tracktor
Parameters:
value: float
The zoom multiplier
"""
self.zoom = float(value)
def get_frame(self, tracking=0):
"""
Returns a processed frame based on what tracking value is passed in
Parameters
----------
tracking: int
determines what to track.
(-2: NONE, -1 ALL, 0...n working_number tracking index)
"""
if self.cap.isOpened():
#initialize ret to false so we enter the while loop
ret = False
#if we cannot retreive the frame, continue onto the next one
while ret is False:
if self.play_state is False:
self.set_frame(self.current_frame - 1)
#grab a frame
ret, frame = self.cap.read()
#use openCL on this data when it can.
frame = cv2.UMat(frame)
#set the current frame number to the frame we just received
self.current_frame = self.cap.get(cv2.CAP_PROP_POS_FRAMES)
if tracking == self.NO_TRACKING:
return (True, frame)
elif tracking == self.TRACK_ALL:
ret, final = self.show_all(frame)
else:
ret, final = self.process(frame, self.trackers[tracking])
ret, final = self.get_focused_frame(final, self.trackers[tracking], self.zoom)
if ret:
final = final.get()
#when we retreive a new frame, we can assume we updated values with it
return (ret, final)
else:
frame = frame.get()
print("unprocessed")
return(True, frame)
def get_focused_frame(self, frame, tracktor, zoom):
"""
Returns a frame centered and zoomed in on the
individual being tracked.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
tracktor: tracktor object
Contains data and basic functions for tracked individual
zoom: int
The value in pixels to be zoomed in.
This is the number of pixels to be zoomed in on all sides;
the original aspect ratio is adjusted.
"""
try:
frame = frame.get()
#create point from tracked individual
pos_x = int(floor(tracktor.meas_now[0][0]))
pos_y = int(floor(tracktor.meas_now[0][1]))
min_y = int(pos_y - (self.height/zoom))
max_y = int(pos_y + (self.height/zoom))
min_x = int(pos_x -(self.width/zoom))
max_x = pos_x + int(self.width/zoom)
if min_y < 0:
min_y = 0
if min_x < 0:
min_x = 0
if min_y >= 0 and max_y <= self.height and min_x >= 0 and max_x <= self.width:
roi = frame[min_y:max_y,
min_x:max_x]
roi = cv2.UMat(roi)
cv2.imshow("resize", roi)
return (True, roi)
else:
frame = cv2.UMat(frame)
return (True, frame)
#roi = cv2.resize(roi, (int(self.width), int(self.height)))
# # #calculate edges based on points
# min_x = int(pos_x - zoom)
# max_x = int(pos_x + zoom)
# min_y = int(pos_y - zoom)
# max_y = int(pos_y + zoom)
# #keeping aspect ratio solves constant oblongness
# original_aspect = self.width/self.height
# zoomed_aspect = (max_x - min_x)/(max_y - min_y)
# print(zoomed_aspect)
# #difference between ratios needed to change
# adjust_aspect = zoomed_aspect - original_aspect
# #ratio is applied to current height
# adjust_height = (max_y - min_y) * adjust_aspect
# #ratio is applied to current width
# adjust_width = (max_x - min_x) * adjust_aspect
# #when height ratio is off
# if original_aspect > zoomed_aspect:
# #subtract half the ammount needed to meet original aspect
# min_y = int(min_y - (adjust_height/2))
# #add half the ammount needed to meet original aspect
# max_y = int(max_y + (adjust_height/2))
# #when width ratio is off
# elif original_aspect < zoomed_aspect:
# #subtract half the ammount needed to meet original aspect
# min_x = int(min_x - (adjust_width/2))
# #add half the ammount needed to meet original aspect
# max_x = int(max_x + (adjust_width/2))
# NOTE: CAUSE OF DISTORTION, we need the outer edge to stop moving as well
# #limit zoom to video edge
# # region of interest
# roi = frame[min_y:max_y, min_x:max_x]
except:
print("Cannot focus frame")
frame = cv2.UMat(frame)
return (True, frame)
def show_all(self, frame, detail=True):
"""
Returns a frame that shows all of the tracked individuals.
Parameters
----------
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
detail: bool
determines whether or not to display contours,
min_area circle and max_area circle.
"""
#iterate through all
try:
final = frame
ret = True
for i in range(len(self.trackers)):
#accumulate tracker's processes onto final frame
ret, final = self.process(final, self.trackers[i])
if ret is True and detail is False:
cv2.circle(frame, tuple([int(x) for x in self.trackers[i].meas_now[0]]), 5,
self.trackers[i].colour, -1, cv2.LINE_AA)
if detail is True:
return (True, final)
else:
return (True, frame)
except:
print("cannot track more than one individual")
return frame
def process(self, frame, tracktor):
"""
This function takes a frame, and a tracked individua and performs operations
on the frame and applies information to the tracktor such as x,y coordinates
First it applies a threshold, erodes and dialates to reduce noise
Before measuring contours, it records the previous coordinates of the tracker
Second, it applies contours to each clustered individual
Last, hungarian_algorithm calculates minimum cost between frames to continue tracking then
Reorder_and_draw then draws the center dot, and min/max area circles
Parameters
----------
tracktor: Tracktor Object
The object containing all the data to be processed
frame: ndarray, shape(n_rows, n_cols, 3)
source image containing all three colour channels
"""
try:
if len(self.track_history) > 0:
self.set_tracker_pos(tracktor)
#eliminate small noise
thresh = tracktor.colour_to_thresh(frame)
# cv2.imshow("thresh", thresh)
thresh = cv2.erode(thresh, tracktor.kernel, iterations=1)
# cv2.imshow("dialate", thresh)
thresh = cv2.dilate(thresh, tracktor.kernel, iterations=1)
# cv2.imshow("erode", thresh)
#x, y coordinates of previous tracktor if meas_now is not empty
if tracktor.meas_now:
pos_x = tracktor.meas_now[0][0]
pos_y = tracktor.meas_now[0][1]
else:
# self.pause()
print("Unable to track " + tracktor.id)
#from our current frame, draw contours and display it on final frame
final, contours = tracktor.detect_and_draw_contours(frame, thresh.get())
# cv2.imshow("detect_and_draw", final)
#detect if the tracker is changed
changed = self.tracker_changed(pos_x, pos_y, contours)
if changed is True:
# self.pause()
print(tracktor.id + "has changed")
row_ind, col_ind = tracktor.hungarian_algorithm()
#try to re-draw, separate try-except block allows redraw of min_area/max_area
final = tracktor.reorder_and_draw(final, col_ind, self.current_frame)
return (True, final)
except:
print("Cannot Process Frame.")
return (False, frame)
def tracker_changed(self, pos_x, pos_y, contours):
"""
NOTE: Function name needs a change.
This function checks if the (pos_x, pos_y) coordinate passed in exists
within the contours that are passed in.
This can either be used to select and assign contours to a tracker,
or check if tracker has changed from it's last position to new contours.
Parameters
----------
pos_x: float
x coordinate on frame
pos_y: float
y coordinate on frame
contours: list
a list of all detected contours that pass the area based threhold criterion
"""
#assign default flag to True (assume changed until proven not)
changed_tracker_flag = True
#if contours exist (not empty)
if contours:
#we look at all the contours
for contour in contours:
#check if previous position exists in updated contour (1= Yes, -1= No)
dist = cv2.pointPolygonTest(contour, (pos_x, pos_y), False)
# print(dist)
#if previous point exists in the same contour, set changed flag to false
if dist != -1.0:
changed_tracker_flag = False
if changed_tracker_flag is True:
print("changed contours")
return changed_tracker_flag
# if no contours exist, we cannot process anything
else:
print("Unable to track ")
return changed_tracker_flag
def export_all(self):
"""
Iterates through the video collecting the data of each tracktor in trackers the list.
Once data is collected, it exports it in a Pandas dataframe with the frame number,
x and y coordinates.
Each individual exports it's own CSV file.
"""
#self.set_frame_pos(1)
#print("setting fame to start:" + str(self.current_frame))
#sets the process to process ALL
self.working_number = self.find_tracker_index_by_id("ALL")
ret = True
#we want to process as fast as we can(1000 fps should be good)
self.cap.set(cv2.CAP_PROP_FPS, 1000)
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame)
#we want playstate to be true so get_frame will work
self.play_state = True
#reset all tracktor's data
for i in range(len(self.trackers)):
self.trackers[i].df = []
# while self.current_frame < self.length:
while self.current_frame < 1030:
# Get a frame from the video source, already processed
ret, frame = self.get_frame(self.working_number)
print("loading: " + str(int(self.current_frame)) + " of "+ str(int(self.length)))
#frame already processed, retreive data from that frame, store it in each trackers
for i in range(len(self.trackers)):
#ignore duplicate frame
if len(self.trackers[i].df) > 1:
last_frame = self.trackers[i].df[i-1][0]
#it is the first frame and we can simulate the previous_frame
else:
last_frame = self.current_frame-1
#try to append data
try:
#if we have a new frame, append it
if self.current_frame != last_frame:
self.trackers[i].df.append([self.current_frame,
self.trackers[i].meas_now[0][0],#store X coord
self.trackers[i].meas_now[0][1] #store Y coord
])
#we received bad data and cannot process it. return -1
except:
print("Could not get location from " + self.trackers[i].id +
" at frame " + str(self.current_frame)
)
self.trackers[i].df.append([self.current_frame, -1, -1])
self.cap.set(cv2.CAP_PROP_FPS, self.FPS)
print("Starting to export....")
#once done processing the video (last frame complete), export to file
for i in range(len(self.trackers)):
print("Exporting: " + self.trackers[i].id)
#load our data into a pandas dataframe
self.trackers[i].df = pd.DataFrame(np.matrix(self.trackers[i].df),
columns=['frame', 'pos_x', 'pos_y'])
#export the data into a csv file
self.trackers[i].df.to_csv(self.output_path + "csv/" + self.trackers[i].id + ".csv")
# Release the video source when the object is destroyed
def __del__(self):
if self.cap.isOpened():
self.cap.release()
|
py | 1a48549e0992edb2082aa8f3c599afdb2b2a2fdb | def maxSubArray(nums):
max_sum = nums[0]
for i in range(len(nums)):
now_sum = 0
for j in range(i, len(nums)):
now_sum += nums[j]
max_sum = max(now_sum, max_sum)
return max_sum
nums = [-57,9,-72,-72,-62,45,-97,24,-39,35,-82,-4,-63,1,-93,42,44,1,-75,-25,-87,-16,9,-59,20,5,-95,-41,4,-30,47,46,78,52,74,93,-3,53,17,34,-34,34,-69,-21,-87,-86,-79,56,-9,-55,-69,3,5,16,21,-75,-79,2,-39,25,72,84,-52,27,36,98,20,-90,52,-85,44,94,25,51,-27,37,41,-6,-30,-68,15,-23,11,-79,93,-68,-78,90,11,-41,-8,-17,-56,17,86,56,15,7,66,-56,-2,-13,-62,-77,-62,-12,37,55,81,-93,86,-27,-39,-3,-30,-46,6,-8,-79,-83,50,-10,-24,70,-93,-38,27,-2,45,-7,42,-57,79,56,-57,93,-56,79,48,-98,62,11,-48,-77,84,21,-47,-10,-87,-49,-17,40,40,35,10,23,97,-63,-79,19,6,39,62,-38,-27,81,-68,-7,60,79,-28,-1,-33,23,22,-48,-79,51,18,-66,-98,-98,50,41,13,-63,-59,10,-49,-38,-70,56,77,68,95,-73,26,-73,20,-14,83,91,61,-50,-9,-40,1,11,-88,-80,21,89,97,-29,8,10,-15,48,97,35,86,-96,-9,64,48,-37,90,-26,-10,-13,36,-27,-45,-3,-1,45,34,77,-66,22,73,54,11,70,-97,-81,-43,-13,44,-69,-78,30,-66,-11,-29,58,52,-61,-68,-81,25,44,-32,57,-81,66,2,52,43,35,-26,16,-33,61,-37,-54,80,-3,32,24,27,30,-69,38,-81,2,-4,47,17,5,42,-58,-51,-90,98,-33,76,-22,95,-4,89,-31,-87,-44,-69,-48,1,87,48,-90,-12,-24,39,18,-86,35,96,-14,-41,13,90,-98,32,-83,-89,7,-17,63,84,-21,-40,51,24,-51,83,31,0,-38,-5,-74,-29,59,1,87,-22,-9,-1,-49,76,57,41,44,35,-27,60,23,56,-80,-14,41,-2,22,-31,99,47,-48,7,-75,13,-97,-50,61,61,27,48,-84,94,-76,-56,70,57,84,-9,-7,-66,-49,-84,89,-29,-22,7,45,-99,75,21,24,-95,-71,48,17,-92,74,-22,45,1,-97,61,-5,-74,81,-57,83,42,33,-47,75,61,-55,41,-68,22,-51,53,-1,-99,-25,-76,-95,3,48,-1,-13,23,53,-68,-76,33,92,-4,35,50,38,18,-8,-52,47,-33,-91,91,85,-60,14,-89,93,89,-89,-55,89,92,47,38,-9,-66,-39,-79,-58,-39,53,-65,56,-11,61,-29,83,-46,19,31,-3,27,-1,-18,67,-87,-8,37,79,-20,58,68,-28,-18,-17,39,-8,43,59,33,81,13,44,37,-98,6,85,84,59,4,-8,-44,-69,91,15,74,80,83,-12,59,-37,-54,5,34,27,87,-50,-81,8,-90,52,-11,-1,-4,-97,0,78,87,-39,37,-32,30,70,-1,21,-38,-50,-22,-55,15,-85,8,60,19,-81,-35,-17,-31,-40,90,-45,-88,-44,53,-15,-41,-70,-37,-77,-33,77,-9,96,24,66,-6,85,92,72,-70,7,86,14,-32,-18,33,9,64,78,68,32,-90,57,87,62,-58,-77,68,-19,-54,-65,-42,13,-68,58,-44,25,43,-52,-26,73,55,-63,-13,-77,18,96,31,-40,51,-1,91,60,-44,55,22,-26,78,-10,32,-99,2,66,13,33,25,68,-65,-32,-84,-14,-82,70,22,5,69,-59,-22,-23,0,-70,53,-32,89,85,-77,-11,-40,77,55,68,77,-43,34,-33,66,-41,-88,-98,27,-72,-13,21,74,85,-74,21,-74,-19,97,2,10,50,46,-1,13,69,87,72,23,20,40,1,76,-49,67,43,10,79,21,-86,83,84,34,34,69,37,-45,72,-82,-70,-26,27,56,97,-97,-31,66,67,-82,-11,-13,57,66,-37,85,11,82,-5,-33,3,-15,-50,-13,95,60,-66,9,-84,-94,26,-78,-44,-70,77,-47,-90,-53,95,76,-36,-38,-60,98,-72,-21,83,15,-38,-45,81,41,16,-69,-94,11,91,-84,-79,83,-79,23,-95,-24,30,58,6,39,-95,1,-8,-54,62,31,-56,67,86,-96,-18,-75,-42,-36,66,73,-29,48,-39,-61,63,-42,98,60,81,-97,-64,11,61,18,-73,42,-80,18,87,58,-51,-69,2,-88,-66,84,-63,-32,-75,79,-82,-28,27,-21,11,-33,13,9,-73,-6,-11,-61,81,-73,57,-92,45,53,25,33,11,50,40,90,62,51,74,75,-81,75,54,-86,-53,-42,-8,34,1,-95,-79,27,-24,-14,42,-66,12,-24,-58,-66,-71,43,66,17,-29,-16,7,-90,-65,-42,84,-70,-90,15,-57,-67,49,11,67,-50,-7,64,53,68,-50,-5,78,38,71,96,71,76,40,15,-7,87,98,76,96,-90,-66,57,-61,-57,-51,-41,-47,97,69,-80,-53,-61,83,76,83,-90,-29,62,47,-81,58,18,95,-2,-67,-12,-38,-92,-35,-65,-83,-25,91,-44,-5,-83,-9,47,-86,-40,43,-63,-1,3,-87,-18,12,-39,-79,-41,-21,79,53,-26,-46,63,39,16,70,80,50,87,-45,19,-80,26,35,10,-27,26,46,92,62,-55,-5,52,4,-93,-87,1,-58,-9,-20,95,42,34,58,-19,-73,5,-39,53,-31,-8,-28,-12,95,84,97,-55,10,44,-62,-51,65,32,-99,-54,16,89,47,57,-42,-96,52,99,14,-13,-43,40,69,-6,-6,-62,85,42,26,80,26,0,-74,-87,-79,-60,-38,63,71,-61,85,-13,-71,9,-78,-14,13,50,-38,-73,-85,18,44,83,-88,-85,-79,73,56,23,31,-40,-99,33,-51,97,72,-13,60,20,26,46,84,31,-45,-94,93,67,55,-45,71,69,49,15,52,37,29,50,-13,-38,-50,-82,-2,-73,27,47,-75,-24,-66,84,96,36,7,80,-56,62,62,-63,6,17,-32,-46,-13,93,45,-84,30,-26,42,-82,13,92,-88,-89,-81,16,34,-57,91,45,-95,87,-42,11,44,2,-50,6,15,33,-76,83,86,-13,76,32,-21,-16,82,-78,-22,-28,90,-34,-40,-91,81,93,-71,73,15,-90,37,73,-3,-41,-48,47,64,66,-43,64,49,-57,-72,3,51,7,63,11,28,-82,82,18,-17,-58,3,-58,-87,8,-85,27,17,28,-23,-85,86,28,38,28,-5,94,-31,-79,-86,-3,0,65,80,-60,-24,8,-43,-65,-97,40,-23,-18,81,-11,90,72,92,-16,0,-30,-25,-36,97,-87,68,-31,83,-63,-33,97,10,66,39,-10,-93,91,74,-37,-74,53,79,-21,-64,37,67,-74,9,60,9,86,-70,84,-73,-96,73,94,-50,57,-69,16,31,18,-18,-53,-92,-35,-62,59,5,-60,12,-16,19,47,-78,-14,49,7,-77,-64,-7,-71,96,19,-67,69,-10,-18,3,-2,97,-89,-84,-44,-43,99,-2,-6,58,-97,11,-29,-14,-70,94,-16,-8,44,91,15,79,-39,20,75,57,52,21,-53,-89,-98,44,84,-88,36,-82,-31,36,15,39,-29,17,-50,41,79,-21,13,-36,71,-66,-68,-37,89,-8,82,41,-74,12,-38,-50,-1,-37,70,-39,-48,7,-22,20,-57,69,-41,13,-14,-14,-68,-58,64,21,5,12,54,13,51,43,-94,11,-16,-92,99,22,-43,-2,62,-72,58,-86,11,-87,33,53,81,68,-57,-56,-46,-49,-14,95,71,67,-16,2,-19,-87,-78,-37,0,-18,-30,-1,-95,4,96,66,31,32,79,-81,44,-11,48,3,-66,90,46,-12,-81,-91,-40,66,76,20,-54,-43,9,-33,19,-91,49,88,7,30,-8,-19,-4,99,-87,-48,-82,33,40,65,-64,73,33,59,-62,28,67,-26,-29,43,71,16,99,-20,83,18,-11,9,-16,72,-61,52,-47,34,29,-58,85,23,75,2,-34,87,-48,75,46,-33,3,-9,40,73,-66,-12,-10,-89,68,-50,5,-66,58,88,82,96,18,-64,7,-53,-23,-31,69,-71,47,-88,-83,98,86,39,-35,-34,-70,82,-60,-36,-30,6,-26,-85,55,55,-75,-10,44,84,-37,-38,-80,69,-15,-27,-85,-69,-21,61,-57,-5,59,-71,-66,-98,-5,-59,60,11,4,-93,93,54,98,48,9,99,-85,-70,83,-23,-32,79,-77,52,-47,-63,60,8,97,-97,-97,33,-92,-87,11,-21,-47,-29,66,33,-45,59,-36,-47,-16,50,-48,-2,79,-64,51,-75,-85,73,76,-56,-90,13,51,83,-8,30,17,-23,20,-72,55,49,-24,-1,-17,7,-42,23,59,42,-27,87,-83,-47,99,68,-46,91,18,-93,-88,28,20,40,-12,-88,-30,-95,-12,66,-90,-79,16,-38,19,75,68,76,-2,27,-5,71,-9,12,-99,-32,-43,-46,-41,74,-40,-53,-21,79,86,67,68,-66,48,-67,99,57,-47,15,-81,71,-33,86,25,65,-10,96,36,58,-15,13,-74,41,66,-39,-7,-97,7,71,59,-6,15,27,4,-36,59,3,-79,89,95,-83,37,-38,79,-38,-96,-53,-41,39,-95,43,-71,-93,-38,71,-33,54,74,50,2,10,-79,-82,-86,24,-19,49,-95,1,38,99,-6,-24,-62,-26,14,-58,20,49,57,1,-7,63,-16,31,34,50,-15,-15,-23,86,94,-2,-96,-92,98,-39,34,-97,62,-28,78,-67,24,93,6,-61,-65,-97,87,68,-20,-43,31,63,87,-57,-10,-51,27,67,-87,-1,-35,-84,-17,-60,-23,-83,-57,-84,-34,-79,-52,89,-86,31,-95,-75,10,69,70,90,-97,1,53,67,43,-56,-84,-52,87,-72,46,-71,-79,-71,-32,-26,-77,10,-34,-12,8,-10,-46,-2,-79,-41,0,8,-95,-30,-2,83,47,-72,50,-9,-29,43,15,-65,70,-39,-37,67,-34,31,-59,-12,-82,6,75,25,96,-70,-99,93,-35,0,1,-54,69,75,-71,16,-96,56,83,-49,-1,-2,-14,-31,35,48,-86,-98,-21,-46,-34,-3,37,-58,98,10,-52,98,3,-11,-2,81,11,-33,56,16,60,36,-28,43,87,47,-81,-50,93,53,97,-93,31,-46,-40,97,27,73,-84,25,-17,-60,1,63,5,98,44,-84,-57,-23,8,79,90,57,22,54,4,17,-96,-3,-29,-99,3,78,-69,40,52,57,13,67,-40,73,83,60,36,-12,35,-43,-20,54,10,88,33,0,45,-67,-46,-51,49,-43,23,96,-65,-74,52,-35,42,4,99,-67,-28,-41,-94,-45,-81,18,43,53,74,99,-15,-39,87,-82,61,9,-73,91,58,76,-74,-19,49,-63,-17,1,1,-97,-94,-23,-65,-46,35,-83,8,53,34,-72,-16,-15,-95,68,45,91,62,-17,1,89,-48,-64,42,-46,-7,-9,-10,52,69,67,54,74,-55,65,-72,79,58,12,10,-31,17,70,53,21,38,-24,-11,-23,35,89,-34,86,-98,-92,-60,-6,-24,6,-53,-55,-26,77,-81,18,20,-77,-26,-22,11,60,47,-72,30,-23,25,-55,52,-85,22,-12,80,87,-49,59,72,-32,-47,-52,73,-24,-8,-76,-69,-13,18,50,9,92,-95,96,52,51,-98,-40,-71,26,4,57,17,-74,-78,-25,90,-50,-66,39,17,-37,86,-33,39,-45,-9,69,41,-91,-4,-73,77,0,-77,7,-48,-76,66,-43,50,-30,90,-56,-27,-87,-5,-37,-38,28,-98,55,91,64,-78,7,-81,12,-47,36,-2,48,62,-25,-75,84,81,-47,-91,24,-14,35,94,-23,78,-56,-34,-49,-17,27,78,-16,-18,46,-75,-20,-70,-80,92,-18,55,-10,-93,17,41,-68,1,0,-39,-14,-76,47,-79,94,-76,76,-62,-11,-73,20,92,81,80,-49,28,-95,30,34,-99,22,-83,55,88,99,-28,7,-69,50,-93,-8,-64,-93,-61,-66,-98,-61,86,-61,27,-87,59,-4,70,16,46,-25,-2,-24,-90,-2,75,-74,-46,40,-98,2,-53,-67,-48,-70,1,-35,-63,16,-2,-62,31,-39,-47,-65,-27,88,30,-80,5,-24,-5,-97,51,4,0,26,6,30,-33,7,-67,-10,16,-39,20,93,25,56,-14,99,70,-83,-40,-77,-49,9,-88,80,29,16,-67,-99,-5,84,-19,71,-13,86,2,30,-30,11,-79,63,71,17,33,-26,-27,-80,-27,-57,-87,10,-35,-36,95,-47,-79,1,45,-69,1,-60,-85,81,-88,-22,44,-10,85,91,-99,-94,31,48,-1,-36,-78,71,-40,-28,90,-27,58,-68,13,53,-15,10,-45,-70,40,32,-30,31,-9,-42,86,-65,24,71,-97,24,53,33,-51,-48,97,-29,99,-66,42,89,6,0,-79,95,-70,5,6,-39,12,-54,93,58,54,-16,92,40,-5,16,11,-25,-83,-59,-92,-35,-8,81,35,-9,-84,-46,-43,-2,30,-23,-6,60,59,99,97,-29,-78,90,-94,52,-49,97,-8,23,13,79,97,6,-80,-95,70,-12,63,-17,55,55,36,-88,-47,-56,-34,23,-96,-98,22,-99,-28,21,68,-46,-50,95,-49,42,18,40,-2,15,-54,-5,-3,-84,82,-63,-25,15,91,-88,3,-56,-68,68,67,-88,69,-34,88,-82,63,56,-29,-86,52,-2,32,-53,-62,-70,62,-17,1,-64,-24,-39,-28,50,75,-37,38,-22,-17,69,-53,-73,80,92,-30,69,-89,-67,2,-42,-77,-69,56,31,-22,93,61,-83,-46,-61,-48,6,-1,23,-67,-26,62,48,29,-55,17,52,-51,-25,44,18,-79,31,27,22,89,50,53,22,-42,-92,-8,-81,-76,22,-65,-25,-72,33,74,-62,84,13,85,13,57,2,-58,82,53,62,0,73,-6,-72,-27,-40,54,-74,58,-88,-90,-50,-92,-67,72,-81,-16,76,51,-65,-86,35,47,98,-75,-19,-22,-57,-36,-69,-94,40,-95,-24,67,-46,35,-2,-44,-7,-13,-35,19,-29,-3,-9,-11,57,-55,-83,91,-42,29,38,-43,53,95,34,73,-41,41,78,99,22,-46,43,75,65,-81,-69,-65,-18,-5,53,29,68,-78,-82,25,-34,-89,-7,23,39,-69,56,-30,-96,-33,-57,-38,-91,97,-39,30,-49,81,6,92,99,36,-73,-42,-68,56,86,76,54,80,2,96,90,94,20,7,-97,-47,76,-94,20,-81,-56,28,-84,-18,-42,-57,-37,40,-88,-61,-23,-62,-4,-15,70,-18,-39,2,-61,39,-2,-71,34,94,35,13,-52,-12,18,67,-17,38,-28,-25,-80,6,17,-18,-53,5,-3,0,42,92,61,-10,-49,-78,91,-11,61,-11,-5,-28,-16,-93,84,8,-5,-21,-48,54,-83,0,-70,-86,-94,23,-5,-71,-71,92,5,47,61,-34,-63,89,-35,-95,-22,-74,-29,49,-26,31,33,-42,-61,-95,13,-10,58,6,89,87,19,71,-12,91,77,16,60,-18,-37,21,25,-23,10,89,-42,65,91,28,-9,-35,-41,-76,-1,-26,-72,88,40,63,-6,6,50,90,-45,-62,81,-68,30,41,-10,93,-61,-85,-53,26,80,4,-9,71,-90,58,-64,-55,82,11,19,86,-1,-64,49,70,42,-23,60,96,-9,18,-72,-78,-41,-6,91,-26,9,-62,99,-11,41,-33,-62,50,-74,-27,95,84,61,-9,70,-40,26,-3,-93,-55,73,66,-59,-59,-16,-55,-38,19,39,-47,93,-52,-10,69,13,-91,-63,50,35,-38,-99,7,-54,61,74,92,97,-22,-11,-95,22,-61,47,63,-20,-91,-92,18,27,23,71,-3,47,-62,-33,-39,-77,-20,87,35,41,87,-81,63,25,93,32,23,-29,98,4,92,-63,-72,32,-7,-64,17,-88,40,-60,59,-86,87,73,-43,-75,73,36,-88,8,-46,99,3,-83,1,-4,26,-99,43,24,-19,13,60,9,-55,-69,44,61,-81,-39,78,54,-25,65,4,31,89,-23,-55,77,61,-2,53,-35,-8,-45,37,-82,-45,-19,41,36,93,-22,-78,-85,8,65,76,3,-96,54,-43,-45,-4,61,62,-38,-62,-93,-61,76,-18,69,-82,73,-76,54,67,-45,-88,8,67,81,62,88,96,-52,54,49,50,34,-20,84,88,52,45,50,-86,59,57,-71,35,-84,97,29,88,97,-16,55,-47,-28,-60,-80,-46,78,-91,-73,-74,39,52,53,-50,-68,37,-62,60,-18,64,73,-82,-2,78,30,13,53,-41,-22,50,19,-90,79,91,-51,76,-78,-95,61,-75,-70,-23,76,59,26,84,-4,40,44,54,-19,-6,72,79,-51,2,-8,-98,37,47,29,-43,56,-15,-75,-94,-39,-77,86,98,-53,-84,-25,99,75,77,60,-52,-6,-19,-97,75,74,74,54,-77,-47,-77,-98,66,69,30,-77,26,-85,-76,8,-47,-54,-6,-49,-31,-14,3,-55,-62,-20,-95,-14,51,-15,-35,26,-64,-84,-43,-41,-32,-44,-63,-89,-97,66,-89,28,57,-66,-87,-90,-43,-17,-39,2,45,40,47,83,96,51,-54,47,-86,10,-50,-51,2,6,-16,46,62,20,56,64,-14,66,-31,-56,77,-42,-70,-66,17,-33,12,-38,-93,-41,-78,-96,87,-56,27,-99,30,77,-51,-68,-40,33,77,98,-70,34,39,16,0,-92,36,-23,-58,65,-13,35,-67,99,97,-84,-65,95,-81,-78,-60,23,98,69,0,-52,-98,59,57,78,58,86,-11,-3,-21,89,-18,91,-57,0,57,7,-64,66,-17,-90,81,17,-95,77,16,-79,0,14,90,99,38,68,35,-28,23,-30,-64,-87,67,14,-98,-74,6,-79,25,-60,4,37,82,86,46,63,-19,28,40,96,48,-60,-13,15,-84,-74,-17,28,-3,-93,97,9,95,41,-99,96,66,6,93,-31,22,-2,82,4,-16,29,-56,41,-66,84,37,58,-99,-75,-26,93,-73,33,21,0,16,18,-90,11,-63,-90,-16,-97,-8,-45,-52,-86,52,-69,-6,-87,36,37,54,69,-2,-32,27,-1,-8,77,-31,-5,-12,66,95,80,-39,-95,-31,-3,90,52,0,-18,-93,47,-28,35,54,65,25,-10,-21,-21,-41,77,46,63,-47,-84,17,-2,10,-95,-36,5,85,24,-14,-46,-78,-24,82,-2,34,66,-78,-94,-22,76,47,-97,-34,-96,-42,2,57,81,-58,-90,96,58,7,-17,40,47,65,2,-29,-72,55,-31,-19,14,66,-85,-43,65,97,35,41,21,14,83,24,72,-38,-19,53,3,-33,26,-61,73,85,78,-3,50,-20,68,78,-88,-63,-41,2,80,-50,59,45,-53,-6,-37,68,84,-77,-31,56,-38,27,-14,64,93,88,79,44,74,57,-59,24,-86,-91,-21,-75,-77,14,4,79,41,-37,24,87,33,63,32,17,62,78,-49,-76,5,36,65,-2,25,44,-58,-24,-21,-40,76,-8,-32,-44,-6,-33,46,97,-54,-13,-63,46,-48,69,9,60,-37,-28,38,13,-5,-57,-73,-63,18,28,81,59,-96,-40,-81,79,28,-36,-88,98,7,58,72,53,-78,-91,-1,-27,54,85,-66,-82,-66,48,7,5,91,33,42,9,-62,0,-55,-59,36,-59,-79,-36,-19,-68,-60,87,66,-88,17,88,97,93,-62,51,55,-52,45,88,96,-47,-7,64,62,-88,-50,-99,11,-6,-82,-53,11,-62,-12,68,-53,27,33,-87,38,-50,77,12,-80,92,-36,74,-60,-91,39,-87,-62,-90,76,77,-79,-74,54,9,-3,71,55,84,86,-57,53,-67,46,-14,-78,-38,12,76,73,9,68,-86,-40,-92,-77,99,97,-63,85,73,-86,-94,76,44,-9,-50,16,-53,-89,2,-34,63,34,89,-74,32,-49,15,8,-76,-99,-24,-62,-40,-39,-63,-41,-42,-50,-56,-92,-59,-73,60,84,17,-90,0,40,97,78,83,37,-11,72,40,-78,-77,-45,29,-77,-45,82,-63,-9,-80,-50,50,-46,0,70,-39,17,73,98,1,-32,-92,78,84,81,56,67,19,-54,39,-41,-33,38,13,72,38,44,31,51,-65,50,-98,61,-96,-22,9,-58,94,-41,-60,-5,26,-76,-27,11,-94,-70,-45,24,-48,71,82,18,-14,-28,-33,-76,92,98,75,-96,48,53,42,29,-69,-49,47,-75,-14,86,-4,-87,86,69,0,68,75,54,-8,-73,2,-49,21,88,-1,87,-88,-9,62,63,-5,-12,16,-63,-83,46,-36,40,47,49,26,-56,38,-11,89,-85,-42,41,46,26,44,-52,77,-58,-64,-24,-94,-52,44,68,87,-61,-44,4,-48,-51,-73,-8,65,51,-82,-9,71,56,56,60,70,-86,-22,-7,40,-78,41,-6,-60,76,46,-55,-99,-10,-87,65,5,-55,-31,33,-30,-28,-75,-65,99,-57,2,70,75,-64,7,22,-51,84,-84,65,82,56,-64,-78,9,82,-33,10,-28,-44,-25,54,-22,20,-13,24,68,12,36,68,31,-62,38,6,-27,-54,-72,-1,-93,-57,-59,89,75,-23,87,-15,-64,-69,71,7,-36,-77,-62,18,19,25,-58,-13,-63,77,-68,44,92,47,-50,-58,69,-23,17,75,-3,58,41,-28,-88,6,33,-53,36,4,30,99,3,68,-6,-78,-7,36,-14,6,-10,17,-50,-18,-36,-24,24,-67,29,-59,85,-74,75,26,-25,86,-68,-92,-67,45,-11,63,21,91,8,-84,90,77,51,-24,-17,-59,92,9,0,-66,84,-99,-34,-10,-82,-72,16,93,31,67,56,39,51,89,-16,-60,29,-94,-91,-86,97,98,90,25,-26,-50,42,-57,58,-58,-24,19,-58,19,-91,-63,46,1,-70,-23,-32,85,-83,-80,51,0,-64,-43,-18,-56,-30,-21,-58,-40,80,-8,9,23,35,-56,64,-89,39,83,29,48,-80,-24,-51,-74,29,-6,87,45,13,39,-77,48,72,4,69,-80,60,87,-21,40,-20,65,-60,-85,85,81,-98,25,64,31,-50,60,83,-2,62,12,68,49,-42,-19,-35,-20,-93,-85,60,75,-66,-3,62,-11,-85,-81,-69,-46,-67,-83,-65,-65,41,75,42,67,12,25,-35,-26,-63,-66,-99,-29,-9,-58,27,-26,-44,-12,-74,-11,61,65,78,75,83,-91,-93,93,-98,-59,-95,19,93,46,-14,5,-52,28,56,-39,38,56,32,-94,97,-41,-20,-69,23,5,19,-38,-30,-26,-63,-69,-40,-57,-76,-62,-39,-72,57,-46,50,-57,58,97,47,-9,-42,-15,-53,66,-9,-78,-97,70,-48,2,-48,47,63,-1,-78,-99,29,-42,-80,52,-5,-20,56,-48,10,6,-28,-31,-20,95,59,37,-19,83,-19,71,-95,-17,18,-67,61,46,79,25,-55,77,2,50,-88,21,2,7,78,-65,35,-12,40,83,33,-80,79,-30,34,-63,-47,-85,84,-66,-26,2,-34,-65,-75,-78,36,-30,76,-62,-80,87,59,-1,-29,37,33,83,-75,-49,66,58,-53,22,-72,57,58,-43,48,42,-10,-78,-79,32,-66,-54,30,69,-8,6,-92,-12,-29,43,63,41,-43,-3,24,-19,24,-32,-84,70,89,-80,5,48,-24,-47,-33,42,-25,-12,-49,-15,10,81,-69,-98,-36,-85,-11,34,57,-47,-47,-86,26,76,-28,-73,-79,-36,73,-89,-16,-22,35,36,31,78,-44,82,-34,6,-33,75,-36,-26,53,5,-11,-80,-84,-77,-28,-32,-63,74,-78,-15,-99,-58,48,73,-71,-91,-48,40,22,59,19,77,41,61,-40,84,14,1,-42,-33,-94,46,-37,-79,69,34,-34,82,-15,-13,-56,-15,5,68,-64,11,77,-36,-49,-24,-77,46,-47,63,8,-11,47,98,89,-95,-58,71,28,5,69,-3,-61,-65,-44,1,-2,-1,85,-97,-56,97,-10,-79,-39,41,-4,-17,-13,25,-54,71,91,92,69,57,97,88,29,2,-7,-2,98,8,9,-69,-91,83,6,71,62,49,68,-47,47,-70,93,-80,12,-43,22,58,-94,13,50,51,-30,24,39,75,-74,-68,-50,-99,17,35,-92,25,18,-10,-4,-19,-60,-35,33,63,-6,3,82,82,59,4,40,41,93,-32,-7,-59,68,-91,-84,71,-82,-57,48,34,77,56,-64,-4,-77,32,76,-38,73,9,-75,-56,-88,84,-74,47,-12,66,-34,-41,-89,35,-1,78,43,-9,49,37,33,-25,-29,11,-92,7,83,-70,-84,59,-8,88,-55,-7,-91,-67,-23,-66,79,42,76,-78,77,86,56,-24,65,-23,43,-9,-86,-23,65,-38,64,49,68,47,79,60,6,-29,25,27,40,33,59,-82,66,15,36,20,37,13,6,-30,65,-52,46,8,16,60,61,-42,-78,25,-92,66,-51,86,26,54,-66,-49,-19,73,60,-83,67,3,32,3,-77,-31,92,29,38,34,76,-38,-57,-31,-78,80,27,-80,6,34,85,54,20,-12,-14,53,15,43,26,-25,60,-29,54,-31,50,77,37,43,6,-48,-46,-41,13,-4,28,11,-46,-68,30,36,65,-8,-10,-15,56,52,-85,-52,-27,17,-1,-67,87,-46,-22,38,-69,-85,-19,13,-57,34,48,33,-92,-47,-56,-62,-16,51,73,-51,-80,-60,10,53,92,24,-99,-35,-58,0,-26,-71,30,51,66,60,42,-76,-50,61,35,97,-6,19,-49,15,56,34,-57,6,60,-38,45,-30,91,37,71,92,78,-87,-31,-71,-82,98,79,61,35,-2,61,84,-63,-27,81,30,68,-91,-78,24,43,-36,-93,3,3,52,49,-6,-11,20,-37,-55,9,31,-27,4,6,-70,-35,-59,27,-97,-75,40,-24,-93,-29,-56,91,-31,45,34,10,51,-86,89,3,63,-17,69,-40,23,-86,69,-46,-14,-27,60,-8,14,-99,96,16,-97,36,68,85,-93,-87,76,-47,34,11,62,-38,1,51,65,-59,-89,11,1,33,24,-53,64,86,-4,1,-44,86,-22,-48,-21,-20,87,-52,-35,71,-63,-58,-76,47,29,62,-91,-93,13,73,-52,0,-39,25,-66,61,48,74,48,-79,-25,-96,-93,52,-68,-38,-67,-81,-14,-26,89,22,-8,-87,-31,-79,74,-45,-95,-36,-72,-71,64,-34,53,74,-73,-22,25,51,-25,99,31,-19,28,62,19,37,81,-94,-88,70,4,3,83,50,1,34,-95,-18,75,-91,10,39,-26,-60,-10,1,17,-85,-48,91,90,83,-51,18,45,44,-44,3,49,-56,-26,-46,46,-66,-96,-76,67,-92,5,42,-84,-85,-42,-10,-46,24,67,47,38,-81,15,28,78,40,-76,1,-15,-21,-96,-66,22,-23,-36,-55,10,-33,-54,-45,-49,50,73,-33,42,-91,33,95,32,-23,20,-52,-5,-65,52,-49,52,75,51,-63,-69,54,-30,29,-91,34,51,-5,77,96,26,-71,46,-23,-28,-12,-15,81,-39,93,-42,57,-82,29,68,47,79,20,-1,7,56,30,-61,-96,-64,-53,14,86,18,-9,82,-55,-4,29,21,44,93,82,2,-69,52,36,87,70,-34,56,17,-78,-24,92,6,-67,22,44,-87,35,90,26,21,-15,93,4,29,-10,-90,-73,-89,79,85,13,-89,38,-51,74,-15,-9,30,78,-10,83,70,95,92,-30,39,-95,-95,6,30,2,90,0,-94,-3,66,91,23,77,48,-14,-33,35,-76,-8,9,-15,83,-83,-37,-27,76,-90,-32,68,-21,-93,49,-40,-11,-44,62,-21,55,44,52,22,13,-24,-24,-39,61,42,72,61,-66,-42,-54,-83,-26,-15,-34,-73,-29,10,94,27,-7,20,86,81,75,48,-62,8,-30,89,-70,82,-58,5,-80,-97,-76,91,40,-43,-51,62,-49,0,-53,16,26,-5,-73,-2,-78,19,-82,-92,-22,70,33,15,-22,-97,4,-16,61,46,65,80,25,88,48,-34,-55,96,-95,-5,-27,-71,88,99,23,91,-26,44,10,-32,28,64,-62,-39,-21,-8,-60,83,75,77,6,40,57,-69,28,-18,-27,50,-21,-22,-78,28,6,-90,4,-71,-99,77,49,-12,-54,-23,-48,-40,15,8,29,31,-32,-19,9,73,-78,-57,80,26,25,-46,-24,80,8,-25,8,90,-16,-87,95,-38,66,44,26,88,-79,54,-51,12,-38,54,-56,29,-65,52,-21,-44,71,-40,59,-4,-10,-88,-47,97,-14,61,87,47,50,82,85,16,3,-12,5,0,-58,30,-87,-19,-16,-44,86,18,84,-34,51,32,2,-13,-71,91,-2,-19,65,61,-81,52,8,45,11,-30,-38,90,57,43,-10,98,-50,2,-44,33,34,-57,-72,-5,-15,55,-72,86,-58,-67,77,17,-10,42,-45,-14,-29,39,-69,58,-91,-31,48,65,-88,-85,40,-39,-6,96,70,-95,-84,75,0,0,7,4,-37,26,13,-60,-57,-97,58,-3,-12,-94,-64,-4,40,-79,64,-35,85,53,-21,2,90,72,-25,38,77,-10,13,-46,66,96,34,-94,22,-53,-55,41,-51,79,-85,14,61,-73,-90,1,-53,50,65,-91,3,-78,11,-6,70,85,-68,47,-47,21,77,95,17,11,-98,-83,57,-77,57,83,79,72,-26,40,98,-40,-81,-54,-90,60,-46,13,58,41,83,29,4,91,-47,56,12,-69,28,-94,18,6,-78,-24,29,56,-64,-15,28,9,-98,3,45,-80,25,54,57,79,-56,15,-3,-73,-56,-99,-82,-3,33,6,27,-38,12,-78,44,10,-26,-27,-34,9,34,93,94,36,-26,39,55,98,-29,12,54,14,-95,-48,41,-52,-48,35,21,62,-58,-75,-99,30,-53,44,-83,20,94,-17,-70,28,-47,-99,-36,26,17,96,25,87,-15,-21,1,-11,7,-81,37,59,54,-42,-2,72,-17,-2,-21,6,-58,-5,-74,-64,77,-68,64,-92,-67,-72,33,49,-76,-65,36,-14,-9,-86,74,97,-67,-12,33,63,23,-69,12,-94,28,90,11,70,-38,13,82,-60,45,46,-76,77,51,56,3,51,68,-61,-63,-64,-48,-88,-67,-39,-24,43,-76,98,73,35,80,-21,2,-32,-74,64,81,-92,80,26,54,-96,-20,-18,36,82,-67,-19,-79,-53,16,-51,-65,49,-13,10,-8,-13,9,-58,99,-11,20,1,57,45,-35,15,30,-78,-59,-39,-75,20,42,38,2,51,-81,-1,97,35,24,-91,-39,87,19,29,-25,-95,70,-26,-30,-32,74,-96,89,-83,18,19,-62,58,79,-60,-45,-2,77,33,-50,72,-68,-76,7,33,-67,44,20,51,-27,94,32,-56,-55,-98,-12,-80,-94,-23,-87,23,73,-73,-41,29,-34,13,-72,-80,-76,-23,69,-45,0,-47,64,32,97,-38,-40,-30,-44,91,-10,1,93,54,-3,75,-91,-14,-2,81,12,33,10,55,-76,37,51,-53,90,20,-22,-32,73,-57,99,47,4,-63,93,36,-95,-16,-86,97,-62,-13,49,-54,72,-75,-96,-15,57,-9,-83,80,-72,67,-73,95,-35,-18,-37,-85,1,-61,61,81,-25,54,-6,-20,-85,8,-46,-70,71,-96,-48,21,-72,-44,82,61,46,98,42,50,42,45,45,-93,-73,84,97,4,-76,58,-15,-2,-10,79,54,-19,-36,-91,10,-65,88,62,55,-84,94,37,53,40,12,-5,-33,-45,-82,-87,-62,-79,96,-64,25,96,70,-90,-5,-40,65,-74,18,-71,-66,-72,-59,98,66,72,90,60,-13,44,-23,98,-61,44,-70,-67,33,-32,-46,6,3,55,3,-27,64,74,10,7,76,-72,35,-89,-68,52,85,98,25,76,35,-88,20,89,-90,58,10,-83,68,-57,-16,-78,25,86,76,-95,-41,17,55,45,-76,32,72,36,19,-95,-12,4,79,-87,-20,14,-99,99,-20,87,35,89,-96,3,-91,-13,1,34,-50,-46,15,-15,70,-29,-69,71,79,-97,-16,-2,83,71,78,62,61,-41,-46,61,-65,33,48,69,0,28,-51,8,-85,26,-81,-36,79,34,49,27,81,56,-25,60,58,58,-42,19,29,-87,-19,-33,-29,11,-71,6,-78,53,-48,21,-18,-22,-71,-27,-96,-75,14,60,58,-60,-13,39,95,38,-24,30,-4,33,-51,-98,22,7,-31,93,-82,-26,-24,-61,-96,4,36,-38,58,42,35,39,66,-51,-1,24,-35,62,-60,-40,0,15,89,95,-75,-61,73,46,45,-58,16,39,15,91,78,19,-27,-9,80,-69,-67,-8,69,98,17,-32,-1,81,-93,15,-59,-17,-70,-92,-45,-69,-77,-71,54,67,-30,-30,6,62,-61,-39,-42,11,52,-85,41,61,7,11,60,-99,55,-64,82,39,51,99,-78,57,83,-23,88,-94,-18,42,72,51,88,-44,-10,-73,-7,47,37,-78,62,-44,83,-54,43,20,-54,-25,55,-95,-86,-17,81,-87,-59,64,-34,-94,70,47,47,19,-25,35,51,65,39,-78,12,-24,-80,-49,8,79,-27,-49,-1,-4,26,-68,-23,16,-9,-42,28,31,99,70,36,69,17,-40,-35,92,72,93,57,11,-85,-54,63,-89,-26,-28,90,46,-1,-11,-58,-98,-3,19,18,88,53,-77,-4,52,93,-67,-2,10,-31,40,-21,-59,33,-64,28,24,-41,-8,34,32,40,24,-22,-84,90,-3,-82,-36,92,12,-48,45,11,25,-25,4,57,49,91,-74,-11,-53,-56,98,-17,71,-1,41,-60,-89,-27,56,11,27,72,1,-76,66,42,92,-22,70,38,88,-5,89,-30,-71,38,-62,-68,-95,-16,-25,79,43,-77,-21,-16,-37,-34,33,-81,76,-62,67,-45,-62,33,96,-69,87,66,45,76,-61,11,-77,-33,-51,-40,-2,-70,20,72,86,-36,72,-35,24,-65,30,-42,-70,-17,-28,74,37,9,7,10,16,-5,-23,-39,-52,-8,71,-53,58,97,-17,-43,-96,-97,-94,89,-34,77,-69,90,88,37,-75,-81,-79,-4,69,34,81,-24,-55,-2,47,-2,35,71,89,-16,94,47,-19,-23,-96,-39,-20,86,-49,45,63,-42,35,29,95,36,24,92,-68,-7,26,90,45,70,-12,-30,-32,99,-59,-43,-17,-87,81,40,-11,84,-98,68,47,-71,-9,-12,-14,-97,-83,-42,39,40,-50,47,-67,53,37,77,23,-98,-53,68,-98,-35,-75,-39,-23,-94,-98,65,67,79,11,-9,84,78,78,-53,80,94,-18,-4,34,31,-56,66,-16,80,21,84,59,67,52,37,-68,53,97,-15,36,75,-72,-20,31,38,70,15,16,49,39,-27,20,-79,69,-45,51,-87,97,-87,69,18,96,28,-37,25,-35,-29,-22,-60,56,-86,-85,60,-30,46,-2,-59,-62,90,66,76,-37,-14,96,-68,17,25,-79,15,14,90,-90,-13,-5,-51,-88,37,-3,-34,53,-47,-43,67,89,26,-10,-13,66,28,-23,9,-19,16,95,-22,25,12,79,45,-96,-7,12,90,79,84,-61,67,-2,35,-67,-49,64,-11,94,53,-84,-38,-83,58,89,-30,-32,-53,86,-60,1,-12,51,-20,-90,32,49,22,22,28,-17,37,-5,57,-50,5,-92,-86,93,78,44,-91,-60,37,67,-94,-92,-88,-47,70,-49,53,-42,78,-89,-33,10,59,65,-91,-35,-75,46,36,81,95,-59,65,85,-65,44,6,-80,83,-56,63,89,-72,74,-81,97,-75,48,-68,80,58,-25,-10,-5,-59,-1,36,-58,-78,-28,-76,93,-10,65,55,1,9,-38,20,-30,-18,-39,-64,9,-65,-46,-17,-63,-98,14,-83,37,88,-17,-91,-94,58,44,-52,79,92,-52,49,-18,-87,5,82,-1,43,-20,68,-98,40,-96,-13,74,-66,69,87,-88,-40,80,25,-52,-36,33,-69,-78,-23,-22,78,-30,2,27,51,14,-91,11,89,28,90,57,29,7,37,-84,-19,47,61,-54,36,-79,-74,-39,-54,-34,94,-24,-35,-29,30,-57,17,-68,46,-54,22,32,56,12,-40,23,-54,66,-70,60,58,-13,-16,20,9,-80,17,35,-19,62,77,-25,-85,-58,22,21,-16,-83,52,-92,-38,-48,39,94,40,98,17,62,41,23,-1,0,86,83,-80,-4,-97,13,-92,-17,-47,84,34,-33,-73,-66,88,-13,-73,17,-6,-12,45,-90,-41,-15,85,52,-75,26,-25,23,-74,-39,6,-78,33,9,35,40,68,64,2,-20,-69,28,-10,95,91,16,-10,-37,-96,-65,-28,-38,-3,-66,-86,21,60,-35,-56,62,2,50,84,-64,-64,-4,52,80,-40,-46,-40,-33,58,49,-60,50,-35,29,-87,-55,63,-39,-17,-40,-28,-27,57,-68,14,-98,93,-83,28,54,-71,63,-73,57,20,-37,-88,80,7,69,6,46,-4,48,-25,-15,92,-85,46,-47,-25,-82,2,-91,-74,-83,-90,95,-90,37,27,-85,-23,53,71,97,93,82,54,0,52,-40,-54,-52,-92,20,33,77,11,-44,-93,62,-50,8,71,-48,1,80,-53,10,-5,73,24,48,4,-4,22,-3,-22,-24,96,-93,13,-81,-68,-3,15,41,-49,-74,73,-43,88,99,42,59,-49,-57,16,-26,53,87,-52,-46,36,28,49,-65,-98,-95,-12,74,87,-99,92,95,-26,7,13,25,9,-14,58,-3,-15,0,-67,12,20,26,86,-27,13,-89,3,-74,38,-70,-39,39,-66,48,-10,97,25,-18,93,98,65,6,0,-49,69,-41,25,-69,35,57,43,-45,-40,6,4,73,16,-92,98,-46,-63,-64,69,-53,60,-64,-56,-15,-6,-86,-39,-64,-3,60,-14,-34,-81,-89,-27,54,45,-84,85,-95,21,-10,54,-86,-3,30,-56,10,65,89,56,26,-75,99,87,18,-86,-52,53,10,-91,-60,52,-96,-73,-75,34,71,-82,20,53,15,-90,7,29,-17,-63,72,92,-97,62,48,5,86,24,-8,-18,37,40,-65,-76,48,-26,52,28,-22,77,-37,-51,71,82,-98,-14,68,9,-85,-49,22,64,-57,24,3,67,-94,-11,-9,-2,70,-94,-62,82,-94,62,-44,58,-33,10,12,29,59,-17,11,37,45,-44,-54,37,6,45,1,25,-31,-96,-8,-25,-31,83,49,-83,88,63,98,93,2,-69,28,68,41,-60,-2,0,-1,85,-63,-55,-58,-40,81,47,-95,-41,-50,-50,-38,41,1,7,24,-50,23,-11,-87,21,-40,14,52,87,-40,-7,-72,57,69,3,42,82,47,60,-81,5,-15,99,63,34,-73,-98,-25,27,9,76,54,-68,-35,43,30,1,-65,-41,-11,-6,28,-7,-72,-26,95,69,-44,20,7,-48,-75,91,-72,88,25,-46,66,76,58,-25,29,12,-16,71,-68,90,-28,-57,-51,60,36,76,-70,63,-73,-74,10,-18,22,-83,10,46,84,38,11,-14,91,-22,-38,49,29,68,-62,89,39,-53,79,-89,-11,-95,48,-97,57,77,42,83,79,-48,-34,1,-55,52,-52,-94,-10,35,-9,-41,-87,30,84,42,-2,99,31,-63,-54,-12,-76,11,-8,71,13,-50,26,-44,-67,5,-16,-25,-93,29,-72,-69,34,17,65,-97,75,55,32,60,-26,-70,59,81,-57,81,-31,-34,-8,61,-85,82,87,40,-62,-80,-55,21,-28,-72,-73,-1,-65,38,92,-99,40,45,55,-51,-18,-94,-45,-82,86,-3,98,-44,39,-33,93,53,-51,57,-7,-37,53,15,61,-75,42,64,0,77,2,-7,54,-81,-85,-14,-56,-4,-9,-2,-10,-23,71,-12,9,11,31,-21,-59,57,-64,11,-80,89,-74,80,-86,44,22,-9,-2,-76,83,-48,-80,74,14,-37,-30,-95,38,59,-41,9,-76,44,96,31,-76,-85,-12,-64,25,-15,-75,50,-58,-84,-6,63,6,92,64,-34,-79,83,-60,35,-77,9,16,37,-55,51,-76,-55,-27,96,75,-27,-89,-59,8,35,25,33,-38,66,25,33,30,-69,2,-6,-27,22,-46,12,-66,-47,97,27,90,-81,-45,-86,-37,27,-90,-62,99,97,-22,-15,32,-97,-82,71,46,19,4,-47,26,-94,46,98,4,76,10,38,-71,-16,-58,95,2,-4,-91,64,-99,95,78,76,92,-66,-39,1,-87,-45,-28,58,73,75,-89,-1,57,33,-26,-38,9,83,76,15,-56,-82,10,45,-10,-4,9,-33,-9,-35,-56,-40,-2,4,37,10,59,-90,-31,9,61,-21,-91,19,89,-18,80,-2,-57,56,89,-14,50,0,-91,-83,95,-6,-16,-37,58,27,-1,-44,-92,-64,66,43,-55,-88,-47,-93,-33,-39,2,-44,19,-18,31,38,-85,20,-98,41,-80,-90,57,91,3,-82,-46,-38,21,29,-6,29,65,-63,-28,-90,-52,24,92,15,61,-6,47,-42,52,-22,95,43,98,96,-39,94,82,-81,86,-14,-87,-83,24,11,46,-82,-60,-12,-45,-12,-26,-21,89,-33,93,27,37,41,84,-33,95,57,86,70,30,-52,65,13,-57,28,98,-45,44,-1,65,-33,-7,81,54,24,-53,-71,-96,-64,-28,73,39,85,-9,24,-71,-13,-42,-84,-43,87,-37,98,0,5,26,-25,59,-52,50,2,-9,-56,-40,-54,67,6,-49,47,18,22,21,-42,-16,88,58,13,-48,-84,28,9,79,90,-16,-20,-28,-89,31,-92,-42,-18,9,25,25,69,70,-31,-48,97,93,70,-4,-9,-95,80,-21,39,-30,31,31,97,-83,10,64,0,89,-64,-13,21,-57,21,79,29,-53,4,-25,93,49,26,91,42,-27,-13,-67,-46,-56,12,92,13,-80,23,-13,-64,-66,-49,12,99,-14,99,-3,6,-3,-24,-65,43,79,9,37,29,-65,5,-52,-15,91,-19,38,-87,69,31,25,88,-69,-87,-99,-36,-37,-11,-36,26,-35,60,-68,62,-63,-57,5,92,51,-81,-2,62,23,46,-53,-8,-96,85,4,72,-7,-71,37,23,-82,14,64,-42,-97,-95,60,-55,64,68,-93,77,-89,-12,47,61,-16,-78,23,-93,67,47,-25,47,9,78,-4,78,84,-90,-22,78,23,-58,-64,3,-54,95,47,87,63,30,41,50,94,-12,11,-22,10,-88,-38,-46,35,13,78,-56,-8,-26,98,-47,-16,-24,-69,83,18,-56,-37,40,-61,-90,27,79,16,-54,6,-12,33,94,65,-80,82,-96,-49,17,17,-71,37,85,-21,35,-62,39,87,-55,0,5,-12,62,-77,4,72,49,-40,-35,71,65,52,-18,59,-5,1,41,-2,28,-65,91,33,71,53,89,-17,90,28,-29,-87,-72,75,0,90,74,-96,39,24,-60,80,-28,-94,33,53,41,-95,-68,-17,-21,59,17,-29,-30,-34,-76,-41,48,91,63,-81,-96,68,71,-20,35,45,-39,-26,-54,0,-69,18,-18,40,-29,-76,44,2,-17,-76,-61,99,-29,8,-58,-29,43,90,-38,6,85,-58,-49,56,22,85,-21,59,-64,24,-41,-33,-81,41,-93,-33,64,28,45,-76,51,83,-77,-78,-32,-58,-8,87,-68,31,-29,-83,49,21,50,-52,7,-71,-93,19,29,-34,85,25,83,69,91,47,-3,36,47,-75,-3,69,-54,41,87,14,6,-81,-78,76,-87,71,-26,62,-81,57,67,-74,-23,-27,-32,-61,97,-49,-92,65,74,-19,2,21,5,75,-33,27,-7,-45,-81,98,-50,-60,51,-38,87,-74,-99,83,59,44,85,-64,-82,-47,-48,91,-20,-64,57,-46,17,-64,74,-78,87,-82,26,-20,-28,44,-44,22,83,-93,60,48,-91,61,31,68,5,16,80,-1,45,-68,-9,-75,-55,-75,-45,61,-40,-94,59,-53,-77,-15,3,-28,-71,58,93,89,42,53,37,27,-9,-55,-5,73,37,-47,-28,-41,-39,39,-17,5,40,14,-34,76,19,-97,99,41,-13,3,-87,-7,-62,82,-18,56,13,95,-16,-96,-83,55,53,53,-92,-97,88,-54,18,-52,50,-41,61,93,-65,-20,95,-65,-79,-41,14,-89,51,51,92,-90,8,-18,81,68,-14,97,23,-84,27,8,-82,15,53,36,62,3,94,0,-27,-94,79,-32,-60,77,4,53,87,-45,-18,56,-58,66,-61,-77,34,24,97,-66,16,24,41,34,-83,-6,-30,55,74,-59,-44,-53,-54,-88,-8,-14,88,95,-84,75,-73,-26,32,-32,-60,70,-32,50,-29,-35,84,86,65,25,20,58,96,66,-9,70,-93,23,93,-70,34,-15,-8,23,-20,7,-2,-16,58,-93,51,97,76,95,48,-53,-63,9,-89,-97,-66,7,37,-93,-26,-72,76,-43,50,47,-14,-15,-68,77,84,-11,-38,-18,72,-80,-11,0,-83,-58,-4,41,-34,32,-50,-24,11,-39,82,-51,-33,-67,-47,-79,-10,80,67,75,-58,75,30,-74,-36,91,-16,-87,-89,49,13,-96,91,-91,45,-43,17,71,9,-71,-91,-32,-46,-47,1,6,-50,67,-14,93,42,4,68,-28,29,-90,-60,-86,-78,-50,62,11,-46,30,19,75,86,-86,23,-28,-58,32,40,-28,84,-82,77,-89,84,-59,-96,26,44,48,75,-49,57,-85,-59,56,-58,-97,-33,-28,33,63,-53,-4,76,46,45,94,-22,-15,-34,-61,2,-80,-51,-14,-63,-71,88,81,77,40,-91,11,32,-51,-33,73,-72,34,-55,37,-26,-32,-89,-73,-86,55,-79,-31,-60,-37,7,18,-41,33,80,-4,61,68,-46,15,9,-38,-73,-82,10,-7,90,14,-96,-88,-48,-23,-44,-38,-20,-31,-83,0,14,-67,39,-78,50,98,31,30,-6,-31,-25,47,84,60,85,-89,77,-28,-20,44,85,-16,55,-63,37,-89,-25,-82,-43,-32,-6,47,0,-66,45,-73,-91,-24,33,-21,45,85,-74,6,45,87,16,0,-41,95,-56,-56,-44,76,-42,70,63,9,87,-80,77,-42,67,-46,-32,12,-19,-24,65,90,54,-13,75,56,92,21,43,-15,-79,78,-43,-59,99,13,-83,-43,-17,80,42,-53,76,19,-19,20,50,49,-90,-69,-98,74,97,33,-62,73,-34,-70,-29,86,14,-32,64,70,8,63,-40,2,96,19,-18,39,42,-42,-65,-76,55,84,72,64,91,50,15,89,-40,52,39,26,58,-90,12,-28,-23,-24,-81,61,-83,-44,-37,-87,74,44,28,17,79,63,40,11,24,-11,-25,16,-84,66,-18,-24,18,-79,-98,-24,-70,-10,24,-17,42,19,-79,58,74,-16,48,49,-95,-47,-57,60,-84,82,-29,17,-52,22,-67,-37,-12,-9,15,-94,11,16,-42,17,82,81,76,25,1,96,-17,-25,-20,-92,-99,-38,-39,-57,-78,-47,-98,-30,69,-51,91,78,-11,-20,-31,-96,-15,56,-3,20,-50,-21,1,-74,-96,78,-77,-36,-69,-21,-52,-92,17,8,-49,39,38,-71,85,-16,-22,76,-37,43,-44,8,23,18,-58,-80,-62,-9,-1,15,17,-21,-29,16,19,1,71,67,-15,-11,-48,12,-95,89,41,89,-26,95,65,13,38,98,-79,-39,16,-38,-42,53,53,33,-32,-53,-88,38,39,31,-84,10,-25,1,75,26,-87,56,16,30,45,66,25,-12,-21,-60,-14,-24,-22,-98,38,35,31,-32,-32,75,-9,-44,-9,-70,63,6,16,-62,83,-31,64,73,-75,-43,3,70,-77,-94,-65,1,-55,20,76,22,-2,-9,-43,-71,35,1,-96,-74,57,-6,-68,-3,77,25,35,60,93,-24,10,-82,32,90,64,31,95,98,-91,-83,-5,61,39,92,-47,72,-79,64,-26,-75,67,-92,-5,98,-19,-28,0,-84,-90,-7,-9,-4,-12,99,-14,-48,30,-42,-72,38,-25,22,0,-86,15,29,62,35,-7,-87,36,-40,96,-68,-64,-22,-20,35,-7,-34,-94,61,62,-7,37,-52,44,-32,-17,71,82,57,-6,-41,47,-91,87,9,20,-42,22,-66,-6,-5,-58,29,72,21,-58,-57,87,-53,-96,26,39,40,73,-17,84,-44,-68,67,89,2,-97,36,-13,90,-77,-93,-52,21,-83,18,-84,-42,-52,88,56,-11,-69,20,35,-89,-54,-49,27,95,33,-11,-71,41,-67,-82,-57,-65,31,29,-75,53,13,-51,-26,6,67,-34,64,91,-69,-3,-43,-62,-83,68,-52,-61,-81,52,-66,28,-59,-38,-30,-27,-43,-11,7,87,95,9,40,8,34,90,90,-22,-67,54,-31,-36,-72,25,1,-79,-7,26,-41,11,-22,-30,-83,-5,31,-14,67,87,74,-49,51,46,36,-9,-46,-29,-42,-78,48,-9,75,94,54,80,-4,-45,-99,89,57,-63,-23,-65,6,-7,29,-86,55,-27,78,7,1,29,-47,-63,97,-16,-16,-45,5,-67,45,-43,-96,-24,-86,-1,8,-85,-35,-35,-72,42,0,33,-88,-94,23,44,-44,1,-72,56,-92,-43,92,-95,40,-23,-41,-78,-14,81,-44,-34,-43,-31,64,41,-40,-70,-93,-13,48,-17,-80,36,-12,20,-20,43,-79,7,-24,-95,64,-31,-91,-19,22,-55,-20,84,-97,35,-50,-64,-96,90,77,-36,-80,83,50,44,-34,47,-19,30,-33,60,-49,-36,-55,26,-54,85,71,-46,-57,-30,-25,22,-46,-23,-66,-20,-10,-62,-52,-33,-22,-33,-73,-95,-88,-31,28,-31,75,71,-71,-74,-64,-50,29,-19,-88,0,11,-68,70,63,53,-99,-83,64,-42,6,78,5,49,-44,48,75,-63,36,-79,65,-95,96,36,-89,98,48,-63,27,-94,-74,-95,-6,-43,51,-43,86,51,-27,50,85,56,28,-33,5,60,92,-42,97,28,-22,39,10,-26,-48,20,-27,-99,33,76,82,58,80,-24,-85,31,32,-98,-40,82,28,-78,38,-66,-11,-57,93,-19,0,67,85,78,-17,-5,-71,35,91,-22,12,2,-45,-6,-40,-88,47,-48,43,-21,-70,-97,-39,57,-99,98,90,-11,-81,61,46,18,28,9,-27,-88,80,2,23,49,56,-65,-49,-89,5,10,-77,-48,61,42,8,90,21,45,25,21,44,15,87,62,-47,33,-43,81,-81,30,69,99,-91,-31,48,64,-20,98,75,84,-15,74,-86,-76,-7,21,90,-86,-34,15,34,86,-92,-2,25,-40,-68,82,-82,-73,-11,63,-74,-3,-68,50,-39,11,25,-87,-27,-89,63,-37,-89,-44,83,-99,-54,-74,-7,80,89,0,-22,14,36,-14,-27,31,13,61,-29,15,-65,3,-34,-27,90,90,-38,-60,77,-97,2,87,-42,-38,65,4,-12,-42,-39,-23,34,15,67,-30,78,-82,77,-32,55,-51,59,90,28,-98,-60,-5,69,1,35,46,-20,-63,11,38,-1,-24,-81,63,33,-44,-83,-33,47,83,13,2,77,-32,69,33,16,6,0,-79,7,39,15,-24,-83,-50,99,-3,-13,10,-88,62,62,-93,25,-5,-61,41,38,62,1,-72,-35,-21,-5,-89,88,-89,16,-12,-92,-76,-96,23,-24,-80,72,-25,-7,-64,61,4,97,23,-13,-1,-82,25,-83,32,-12,18,60,29,73,-45,39,61,-57,-67,-74,-73,-67,-71,49,8,-52,22,83,18,34,21,-78,9,-55,-14,-92,-61,-89,-98,71,-25,-4,8,-96,-54,39,20,83,58,52,-91,85,84,-63,34,-31,-39,-67,-48,78,44,50,77,-47,94,-37,-63,-67,-50,-62,80,2,10,-12,5,55,-95,-98,38,62,30,46,47,14,59,-41,83,-79,-32,-88,75,-88,61,29,-36,55,91,-22,65,-81,-8,45,20,-97,-89,-98,57,-85,-96,-27,76,33,-81,1,-75,55,36,-92,52,-96,95,27,-84,34,-43,-44,-34,-75,33,-69,-57,-74,53,62,-95,40,64,38,54,44,-89,7,-23,-93,84,1,61,20,-15,13,24,-42,-83,39,91,-27,94,-43,-2,5,-35,17,7,-82,-20,11,57,-79,-74,-12,64,-63,71,-59,-57,56,19,80,-24,-96,-7,76,-39,9,-8,-71,59,-36,-14,-66,68,50,50,-25,44,-70,62,-22,-50,64,65,-86,-99,13,-68,-80,-31,50,76,22,30,-31,-2,68,55,90,96,-9,-69,-41,24,98,85,-49,-50,6,-42,88,83,-16,52,25,-25,30,-61,6,49,-16,-67,26,-94,39,71,-19,7,3,-52,-19,93,-22,39,-6,-47,2,45,2,-92,-97,-10,91,-14,-81,-7,60,48,8,-57,-25,92,-25,77,97,-85,25,-45,-2,-71,2,78,98,56,-5,-30,-91,73,-85,10,80,93,76,48,-44,72,-58,-83,20,49,-64,94,18,11,48,16,2,-26,47,99,-21,-50,55,-23,-94,-73,46,-85]
print(maxSubArray(nums))
|
py | 1a4854c7b1d6069c3f9b84d885935f98de2ef587 | # This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Helper method to create a API generator based on the current configuration
in the environment valriables.
"""
from contextlib import contextmanager
from typing import Dict, Optional
from flowserv.config import Config
from flowserv.service.api import API, APIFactory
from flowserv.service.local import LocalAPIFactory
import flowserv.config as config
# -- API factory pattern for client applications ------------------------------
def ClientAPI(
env: Optional[Dict] = None, basedir: Optional[str] = None,
database: Optional[str] = None, open_access: Optional[bool] = None,
run_async: Optional[bool] = None, user_id: Optional[str] = None
) -> APIFactory:
"""Create an instance of the API factory that is responsible for generating
API instances for a flowserv client.
The main distinction here is whether a connection is made to a local instance
of the service or to a remote instance. This distinction is made based on
the value of the FLOWSERV_CLIENT environment variable that takes the values
'local' or 'remote'. The default is 'local'.
Provides the option to alter the default settings of environment variables.
Parameters
----------
env: dict, default=None
Dictionary with configuration parameter values.
basedir: string, default=None
Base directory for all workflow files. If no directory is given or
specified in the environment a temporary directory will be created.
database: string, default=None
Optional database connect url.
open_access: bool, default=None
Use an open access policy if set to True.
run_async: bool, default=False
Run workflows in asynchronous mode.
user_id: string, default=None
Optional identifier for the authenticated API user.
Returns
-------
flowserv.service.api.APIFactory
"""
# Get the base configuration settings from the environment if not given.
env = env if env is not None else config.env()
if not isinstance(env, Config):
env = Config(env)
# Update configuration based on the given optional arguments.
if basedir is not None:
env.basedir(basedir)
if database is not None:
env.database(database)
if open_access is not None and open_access:
env.open_access()
# By default, the client runs all workflows synchronously.
if run_async is not None and run_async:
env.run_async()
elif env.get(config.FLOWSERV_ASYNC) is None:
env.run_sync()
# Create local or remote API factory depending on the FLOWSERV_CLIENT value.
client = env.get(config.FLOWSERV_CLIENT, config.LOCAL_CLIENT)
if client == config.LOCAL_CLIENT:
return LocalAPIFactory(env=env, user_id=user_id)
elif client == config.REMOTE_CLIENT:
# Not implemented yet.
pass
raise ValueError("inalid client type '{}'".format(client))
@contextmanager
def service() -> API:
"""Context manager that returns a service API that was instantiated from the
current configuration settings in the environment.
Returns
-------
flowserv.service.api.API
"""
# Create the API factory from the current environment settings.
factory = ClientAPI()
with factory() as api:
yield api
|
py | 1a4854c8b590c6ea8078a2fd7c10b40b5c1992ac | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import LUISAuthoringClientConfiguration
from msrest.exceptions import HttpOperationError
from .operations import FeaturesOperations
from .operations import ExamplesOperations
from .operations import ModelOperations
from .operations import AppsOperations
from .operations import VersionsOperations
from .operations import TrainOperations
from .operations import PermissionsOperations
from .operations import PatternOperations
from .operations import SettingsOperations
from .operations import AzureAccountsOperations
from . import models
class LUISAuthoringClient(SDKClient):
"""LUISAuthoringClient
:ivar config: Configuration for client.
:vartype config: LUISAuthoringClientConfiguration
:ivar features: Features operations
:vartype features: azure.cognitiveservices.language.luis.authoring.operations.FeaturesOperations
:ivar examples: Examples operations
:vartype examples: azure.cognitiveservices.language.luis.authoring.operations.ExamplesOperations
:ivar model: Model operations
:vartype model: azure.cognitiveservices.language.luis.authoring.operations.ModelOperations
:ivar apps: Apps operations
:vartype apps: azure.cognitiveservices.language.luis.authoring.operations.AppsOperations
:ivar versions: Versions operations
:vartype versions: azure.cognitiveservices.language.luis.authoring.operations.VersionsOperations
:ivar train: Train operations
:vartype train: azure.cognitiveservices.language.luis.authoring.operations.TrainOperations
:ivar permissions: Permissions operations
:vartype permissions: azure.cognitiveservices.language.luis.authoring.operations.PermissionsOperations
:ivar pattern: Pattern operations
:vartype pattern: azure.cognitiveservices.language.luis.authoring.operations.PatternOperations
:ivar settings: Settings operations
:vartype settings: azure.cognitiveservices.language.luis.authoring.operations.SettingsOperations
:ivar azure_accounts: AzureAccounts operations
:vartype azure_accounts: azure.cognitiveservices.language.luis.authoring.operations.AzureAccountsOperations
:param endpoint: Supported Cognitive Services endpoints (protocol and
hostname, for example: https://westus.api.cognitive.microsoft.com).
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
self.config = LUISAuthoringClientConfiguration(endpoint, credentials)
super(LUISAuthoringClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '3.0-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.features = FeaturesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.examples = ExamplesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.model = ModelOperations(
self._client, self.config, self._serialize, self._deserialize)
self.apps = AppsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.versions = VersionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.train = TrainOperations(
self._client, self.config, self._serialize, self._deserialize)
self.permissions = PermissionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.pattern = PatternOperations(
self._client, self.config, self._serialize, self._deserialize)
self.settings = SettingsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.azure_accounts = AzureAccountsOperations(
self._client, self.config, self._serialize, self._deserialize)
|
py | 1a48563133ff1a8201df1736f6ec96d5b9073440 | """
WSGI config for dsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dsite.settings")
application = get_wsgi_application()
|
py | 1a4857a6dbaf5fc44b9554c9644aea48e288dc89 | from zope.security.interfaces import Unauthorized
from zope.testbrowser.browser import LinkNotFoundError
import pytest
EVENT_VIEW_CONFIGURATION_ADD_TEXT = 'event view configuration'
def test_masterdata__Table__1(address_book, browser):
"""It allows to navigate to the event views list."""
browser.login('cal-visitor')
browser.open(browser.CALENDAR_MASTERDATA_URL)
browser.getLink('Event views').click()
assert browser.url == browser.CALENDAR_MASTERDATA_EVENTVIEW_URL
def test_masterdata__Table__2(address_book, browser):
"""It renders a message if there are no event view configurations yet."""
browser.login('cal-visitor')
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
assert 'No event views defined yet.' in browser.contents
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Table__3(address_book, browser, login):
"""It renders no add link for any calendar user."""
browser.login(login)
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
with pytest.raises(LinkNotFoundError):
browser.getLink(EVENT_VIEW_CONFIGURATION_ADD_TEXT)
def test_masterdata__Table__4(address_book, browser):
"""It prevents access for anonymous."""
browser.handleErrors = False # needed to catch exception
with pytest.raises(Unauthorized):
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
def test_masterdata__Add__1(address_book, browser):
"""It allows administrators to add a new category in the list."""
browser.login('mgr')
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
browser.getLink(EVENT_VIEW_CONFIGURATION_ADD_TEXT).click()
assert browser.CALENDAR_EVENTVIEW_CONFIGURATION_ADD_URL == browser.url
browser.getControl('title').value = 'default'
browser.getControl('Add').click()
assert '"default" added.' == browser.message
# The new configuration shows up in the list:
assert '>default<' in browser.contents
def test_masterdata__Add__2(
address_book, EventViewConfigurationFactory, browser):
"""It prevents adding a new config with an already existing title."""
EventViewConfigurationFactory(address_book, u'default')
browser.login('mgr')
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_ADD_URL)
browser.getControl('title').value = 'default'
browser.getControl('Add').click()
assert 'There were some errors.' in browser.contents
assert 'This title is already used for an ' in browser.contents
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Add__3(address_book, browser, login):
"""It is not accessible for any calendar user."""
browser.login(login)
browser.assert_forbidden(browser.CALENDAR_EVENTVIEW_CONFIGURATION_ADD_URL)
def test_masterdata__Edit__1(
address_book, EventViewConfigurationFactory, CategoryFactory, browser):
"""It allows to edit a category."""
EventViewConfigurationFactory(address_book, u'default')
CategoryFactory(address_book, u'foo')
CategoryFactory(address_book, u'bar')
browser.login('mgr')
browser.open(browser.CALENDAR_MASTERDATA_EVENTVIEW_URL)
browser.getLink('default').click()
assert browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL == browser.url
assert 'default' == browser.getControl('title').value
browser.getControl('title').value = 'alternative'
browser.getControl('start date').displayValue = ['3 days in past']
browser.getControl('duration').displayValue = ['3 weeks']
browser.getControl('categories').displayValue = ['bar']
browser.getControl('show fields').displayValue = ['persons']
browser.getControl('Save').click()
assert 'Data successfully updated.' == browser.message
# The changed category name shows up in the list:
assert 'alternative' in browser.contents
browser.getLink('alternative').click()
assert browser.getControl('title').value == 'alternative'
assert browser.getControl('start date').displayValue == ['3 days in past']
assert browser.getControl('duration').displayValue == ['3 weeks']
assert browser.getControl('categories').displayValue == ['bar']
assert browser.getControl('show fields').displayValue == ['persons']
def test_masterdata__Edit__2(
address_book, EventViewConfigurationFactory, browser):
"""It prevents changing a category title to an existing one."""
EventViewConfigurationFactory(address_book, u'default')
EventViewConfigurationFactory(address_book, u'alternative')
browser.login('mgr')
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL)
browser.getControl('title').value = 'alternative'
browser.getControl('Save').click()
assert 'There were some errors.' in browser.contents
assert 'This title is already used for an ' in browser.contents
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Edit__3(
address_book, EventViewConfigurationFactory, browser, login):
"""It allows calendar users only to see the event view configuration data.
But they cannot change or delete them.
"""
EventViewConfigurationFactory(address_book, u'foo')
browser.login(login)
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL)
# There are no fields and no delete button:
assert (['form.buttons.apply', 'form.buttons.cancel'] ==
browser.all_control_names)
def test_masterdata__Delete__1(
address_book, EventViewConfigurationFactory, browser):
"""It allows to delete an event view configuration."""
EventViewConfigurationFactory(address_book, u'default')
browser.login('mgr')
browser.open(browser.CALENDAR_EVENTVIEW_CONFIGURATION_EDIT_URL)
browser.getControl('Delete').click()
assert browser.CALENDAR_EVENTVIEW_CONFIGURATION_DELETE_URL == browser.url
assert ('Do you really want to delete this event view configuration?' in
browser.contents)
browser.getControl('Yes').click()
assert '"default" deleted.' == browser.message
@pytest.mark.parametrize('login', ('cal-visitor', 'cal-editor'))
def test_masterdata__Delete__2(
address_book, EventViewConfigurationFactory, browser, login):
"""It is not accessible for any calendar user."""
EventViewConfigurationFactory(address_book, u'foo')
browser.login(login)
browser.assert_forbidden(
browser.CALENDAR_EVENTVIEW_CONFIGURATION_DELETE_URL)
|
py | 1a4857fcd67a410e03843e75f848876c0363aea8 | from setuptools import setup, find_packages
PACKAGE_NAME = "lintreview"
VERSION = "0.14.0"
requirements = open('./requirements.txt', 'r')
setup(
name=PACKAGE_NAME,
version=VERSION,
description="""
Lint Review, an automated code review tool that integrates with github.
Integrates with the github API & a variety of code checking tools.
""",
author="Mark story",
author_email="[email protected]",
packages=find_packages(),
entry_points={
'console_scripts': [
'lintreview = lintreview.cli:main',
],
},
install_requires=requirements.readlines(),
)
|
py | 1a48581f4c594ef3e014425db5f5ef4fa1989cda | from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class DialaphoneSpider(BaseSpider):
name = 'dialaphone.co.uk'
allowed_domains = ['dialaphone.co.uk']
start_urls = ['http://www.dialaphone.co.uk/pay-as-you-go/']
def parse(self, response):
hxs = HtmlXPathSelector(response)
urls = hxs.select('//*[@id="ulManufacturerLinks"]/li/a/@href').extract()
for url in urls:
yield Request(url, callback=self.parse_categories)
def parse_categories(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@class="List"]/tr')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'td[@class="DealIncludes"]/a[@class="PhoneName"]/text()')
loader.add_xpath('url', 'td[@class="DealIncludes"]/a[@class="PhoneName"]/@href')
price = 0.0
if product.select('td[@class="Price"]/text()'):
price = product.select('td[@class="Price"]/text()').extract()[0]
loader.add_value('price', price)
yield loader.load_item()
|
py | 1a4859246f71670193f9b2ff929c6c8f3f6df6a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.md') as history_file:
history = history_file.read()
install_requires = [
'torch<2,>=1.0',
'torchvision<1,>=0.4.2',
'scikit-learn<0.23,>=0.21',
'numpy<2,>=1.17.4',
'pandas<0.26,>=0.24',
]
setup_requires = [
'pytest-runner>=2.11.1',
]
tests_require = [
'pytest>=3.4.2',
'pytest-cov>=2.6.0',
]
development_requires = [
# general
'bumpversion>=0.5.3',
'pip>=9.0.1',
'watchdog>=0.8.3',
# docs
'm2r>=0.2.0',
'Sphinx>=1.7.1',
'sphinx_rtd_theme>=0.2.4',
'autodocsumm>=0.1.10',
# style check
'flake8>=3.7.7',
'isort>=4.3.4',
# fix style issues
'autoflake>=1.2',
'autopep8>=1.4.3',
# distribute on PyPI
'twine>=1.10.0',
'wheel>=0.30.0',
# Advanced testing
'coverage>=4.5.1',
'tox>=2.9.1',
]
setup(
author='MIT Data To AI Lab',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description='Conditional GAN for Tabular Data',
entry_points={
'console_scripts': [
'ctgan=ctgan.__main__:main'
],
},
extras_require={
'test': tests_require,
'dev': development_requires + tests_require,
},
install_package_data=True,
install_requires=install_requires,
license='MIT license',
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
include_package_data=True,
keywords='ctgan CTGAN',
name='ctgan',
packages=find_packages(include=['ctgan', 'ctgan.*']),
python_requires='>=3.5',
setup_requires=setup_requires,
test_suite='tests',
tests_require=tests_require,
url='https://github.com/sbuttler/CTGAN',
version='0.2.2.dev0',
zip_safe=False,
)
|
py | 1a485a9fdb727bf529ba3ca19fc2e0569487953e | # This is a very trivial series of tests. If apply is subtlely broken,
# we will have to find out some other way.
class AppTestApply:
def test_trivial_listonly(self):
def mymin(*args):
return min(list(args))
assert apply(mymin, [-1,-2,-3,-4]) == -4
def test_trivial_dictonly(self):
def mymin(*arr, **kwargs):
return min(list(arr) + kwargs.values())
assert apply(mymin,
[], {'null' : 0, 'one': 1, 'two' : 2}) == (
0)
def test_trivial(self):
def mymin(*arr, **kwargs):
return min(list(arr) + kwargs.values())
assert apply(mymin,
[-1,-2,-3,-4],
{'null' : 0, 'one': 1, 'two' : 2}) == (
(-4))
|
py | 1a485b0b754a5890f7c0b1d4e1805f52aea4cef5 | balance = 3926
annualInterestRate = 0.2
#import time
monthlyInterest = annualInterestRate / 12.0
workingBalance = balance
minimumPayment = 0
while workingBalance > 0:
minimumPayment += 10
workingBalance = balance
for month in range(1, 13):
workingBalance -= minimumPayment
workingBalance = workingBalance + (workingBalance * monthlyInterest)
# print month
# print minimumPayment
# print workingBalance
# time.sleep(0.01)
print "Lowest Payment: " + str(minimumPayment)
|
py | 1a4861826473bc3f856dc52486d9f53ec2c229b4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
This is a poor man's port of set_up_volume.sh to allow `image_package` to
emit btrfs loopbacks. In ~1 weeks' time, this will be replaced by a
better-tested, more robust, and more coherent framework for handling images
and loopbacks.
'''
import logging
import os
import subprocess
import sys
import tempfile
from typing import Optional
from .common import byteme, get_file_logger, run_stdout_to_err
from .unshare import Unshare, nsenter_as_root, nsenter_as_user
log = get_file_logger(__file__)
MiB = 2 ** 20
# Otherwise, `mkfs.btrfs` fails with:
# ERROR: minimum size for each btrfs device is 114294784
MIN_CREATE_BYTES = 109 * MiB
# The smallest size, to which btrfs will GROW a tiny filesystem. For
# lower values, `btrfs resize` prints:
# ERROR: unable to resize '_foo/volume': Invalid argument
# MIN_GROW_BYTES = 175 * MiB
#
# When a filesystem's `min-dev-size` is small, `btrfs resize` below this
# limit will fail to shrink with `Invalid argument`.
MIN_SHRINK_BYTES = 256 * MiB
def _round_to_loop_block_size(num_bytes: int, log_level: int) -> int:
'''
Avoid T24578982: btrfs soft lockup: `losetup --set-capacity /dev/loopN`
wrongly sets block size to 1024 when backing file size is 4096-odd.
Future: maybe we shouldn't hardcode 4096, but instead query:
blockdev --getbsz /dev/loopSOMETHING
'''
block_size = 4096
rounded = num_bytes + (block_size - (num_bytes % block_size)) % block_size
if num_bytes != rounded:
log.log(
log_level,
f'Rounded image size {num_bytes} up to {rounded} to avoid kernel '
'bug.'
)
return rounded
def _create_or_resize_image_file(
path: bytes, at_least_bytes: int, log_level: int=logging.INFO,
):
'''
Be sure to call `btrfs filesystem resize` and `losetup --set-capacity`
in the appropriate order.
'''
rounded_bytes = _round_to_loop_block_size(at_least_bytes, log_level)
run_stdout_to_err([
'truncate', '-s', str(rounded_bytes), path,
], check=True)
def _fix_up_fs_size(size_bytes: int, min_usable_fs_size: int) -> int:
if size_bytes < min_usable_fs_size:
log.warning(
f'btrfs cannot use a size of {size_bytes} < {min_usable_fs_size} '
'bytes, will use the larger size'
)
return min_usable_fs_size
return size_bytes
def _format_image_file(path: bytes, size_bytes: int) -> int:
'Returns the actual filesystem size, which may have been increased.'
size_bytes = _fix_up_fs_size(size_bytes, MIN_CREATE_BYTES)
log.info(f'Formatting btrfs {size_bytes}-byte FS at {path}')
_create_or_resize_image_file(path, size_bytes)
# Note that this can fail with 'cannot check mount status' if the
# host is in a bad state:
# - a file backing a loop device got deleted, or
# - multiple filesystems with the same UUID got mounted as a loop
# device, breaking the metadata for the affected loop device (this
# latter issue is a kernel bug).
# We don't check for this error case since there's nothing we can do to
# remediate it.
run_stdout_to_err(['mkfs.btrfs', path], check=True)
return size_bytes
def _mount_image_file(
unshare: Optional[Unshare], file_path: bytes, mount_path: bytes,
) -> bytes:
log.info(f'Mounting btrfs {file_path} at {mount_path}')
# Explicitly set filesystem type to detect shenanigans.
run_stdout_to_err(nsenter_as_root(
unshare, 'mount', '-t', 'btrfs', '-o', 'loop,discard,nobarrier',
file_path, mount_path,
), check=True)
loop_dev = subprocess.check_output(nsenter_as_user(
unshare, 'findmnt', '--noheadings', '--output', 'SOURCE',
mount_path,
)).rstrip(b'\n')
# This increases the chances that --direct-io=on will succeed, since one
# of the common failure modes is that the loopback's sector size is NOT
# a multiple of the sector size of the underlying device (the devices
# we've seen in production have sector sizes of 512, 1024, or 4096).
if run_stdout_to_err([
'sudo', 'losetup', '--sector-size=4096', loop_dev,
]).returncode != 0:
log.error(
f'Failed to set --sector-size=4096 for {loop_dev}, setting '
'direct IO is more likely to fail.'
)
# This helps perf and avoids doubling our usage of buffer cache.
# Also, when the image is on tmpfs, setting direct IO fails.
if run_stdout_to_err([
'sudo', 'losetup', '--direct-io=on', loop_dev,
]).returncode != 0:
log.error(
f'Could not enable --direct-io for {loop_dev}, expect worse '
'performance.'
)
return loop_dev
def _minimize_image_size(
*, unshare: Optional[Unshare], cur_size: int, image_path: bytes,
mount_path: bytes, loop_dev: bytes,
) -> int:
'Returns the new filesystem size.'
min_size_out = subprocess.check_output(nsenter_as_root(
unshare, 'btrfs', 'inspect-internal', 'min-dev-size', mount_path,
)).split(b' ')
assert min_size_out[1] == b'bytes'
min_size = _fix_up_fs_size(int(min_size_out[0]), MIN_SHRINK_BYTES)
if min_size >= cur_size:
log.info(
f'Nothing to do: the minimum resize limit {min_size} is no less '
f'than the current filesystem size of {cur_size} bytes.'
)
return
log.info(f'Shrinking {image_path} to the btrfs minimum, {min_size} bytes')
run_stdout_to_err(nsenter_as_root(
unshare, 'btrfs', 'filesystem', 'resize', str(min_size),
mount_path,
), check=True)
fs_bytes = int(subprocess.check_output(nsenter_as_user(
unshare, 'findmnt', '--bytes', '--noheadings', '--output', 'SIZE',
mount_path,
)))
# Log an error on size rounding since this is not expected to need it.
_create_or_resize_image_file(image_path, fs_bytes, log_level=logging.ERROR)
run_stdout_to_err([
'sudo', 'losetup', '--set-capacity', loop_dev,
], check=True)
return min_size
class LoopbackVolume:
def __init__(
self, unshare: Optional[Unshare], image_path: bytes, size_bytes: int,
):
self._unshare = unshare
self._temp_dir_ctx = tempfile.TemporaryDirectory() # noqa: P201
self._size_bytes = size_bytes
self._image_path = byteme(os.path.abspath(image_path))
self._temp_dir: Optional[bytes] = None
self._mount_dir: Optional[bytes] = None
def __enter__(self) -> 'LoopbackVolume':
self._temp_dir = byteme(
os.path.abspath(self._temp_dir_ctx.__enter__())
)
try:
self._size_bytes = _format_image_file(
self._image_path, self._size_bytes
)
self._mount_dir = os.path.join(self._temp_dir, b'volume')
os.mkdir(self._mount_dir)
self._loop_dev = _mount_image_file(
self._unshare, self._image_path, self._mount_dir,
)
except BaseException:
self.__exit__(*sys.exc_info())
raise
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> bool:
'This only suppresses exceptions if TemporaryDirectory.__exit__ does.'
if self._mount_dir:
# If this throws, we won't be able to clean up `_mount_dir`, so
# let the error fly. If the loopback is inside an Unshare
# object, the mount itself will eventually get cleaned up, but
# we don't have ownership to trigger Unshare cleanup, and in any
# case, that kind of clean-up is asynchronous, and would be
# tricky to await properly.
#
# NB: It's possible to use tmpfs and namespaces to guarantee
# cleanup, but it's just an empty directory in `/tmp`, so it's
# really not worth the complexity.
self.unmount_if_mounted()
return self._temp_dir_ctx.__exit__(exc_type, exc_val, exc_tb)
def unmount_if_mounted(self):
if self._mount_dir:
# Nothing might have been mounted, ignore exit code
run_stdout_to_err(
nsenter_as_root(self._unshare, 'umount', self._mount_dir),
)
def dir(self) -> bytes:
return self._mount_dir
def minimize_size(self) -> int:
'Returns the new image size.'
self._size_bytes = _minimize_image_size(
unshare=self._unshare,
cur_size=self._size_bytes,
image_path=self._image_path,
mount_path=self._mount_dir,
loop_dev=self._loop_dev,
)
return self._size_bytes
|
py | 1a48619e2c0d507aba7ef76075212c592f7e7499 | import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from models.connections.conv_2d_bn_activ import Conv2DBNActiv
from models.connections.resblock import ResBlock
from chainercv.links import PickableSequentialChain
class SERes2Net(PickableSequentialChain):
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3]
}
def __init__(self, n_layer,
n_class=None, scale=4,
pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
blocks = self._blocks[n_layer]
self.mean = mean
if initialW is None:
initialW = initializers.HeNormal(scale=1., fan_option='fan_out')
if 'initialW' not in fc_kwargs:
fc_kwargs['initialW'] = initializers.Normal(scale=0.01)
kwargs = {
'scale': scale, 'initialW': initialW, 'stride_first': True,
'add_seblock': True}
super(SERes2Net, self).__init__()
with self.init_scope():
self.conv1 = Conv2DBNActiv(None, 64, 3, 1, 1, nobias=True,
initialW=initialW)
self.res2 = ResBlock(blocks[0], None, 64, 256, 2, **kwargs)
self.res3 = ResBlock(blocks[1], None, 128, 512, 1, **kwargs)
self.res4 = ResBlock(blocks[2], None, 256, 1024, 2, **kwargs)
self.res5 = ResBlock(blocks[3], None, 512, 2048, 1, **kwargs)
self.pool5 = lambda x: F.average(x, axis=(2, 3))
self.fc6 = L.Linear(None, n_class, **fc_kwargs)
class SERes2Net50(SERes2Net):
def __init__(self, n_class=10, scale=4, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SERes2Net50, self).__init__(
50, n_class, scale, pretrained_model,
mean, initialW, fc_kwargs)
class SERes2Net101(SERes2Net):
def __init__(self, n_class=10, scale=4, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SERes2Net101, self).__init__(
101, n_class, scale, pretrained_model,
mean, initialW, fc_kwargs)
class SERes2Net152(SERes2Net):
def __init__(self, n_class=10, scale=4, pretrained_model=None,
mean=None, initialW=None, fc_kwargs={}):
super(SERes2Net152, self).__init__(
152, n_class, scale, pretrained_model,
mean, initialW, fc_kwargs)
|
py | 1a4862fe2df399e15f31f7a1cdbba256e26f20fe | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/14 16:30
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_cons(symbol: str = "801011") -> pd.DataFrame:
"""
申万指数成份信息-包括一级和二级行业都可以查询
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: 指数代码
:type symbol: str
:return: 申万指数成份信息
:rtype: pandas.DataFrame
"""
url = f"http://www.swsindex.com/downfile.aspx?code={symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 4:
stock_code = cols[0].text
stock_name = cols[1].text
weight = cols[2].text
start_date = cols[3].text
data.append(
{
"stock_code": stock_code,
"stock_name": stock_name,
"start_date": start_date,
"weight": weight,
}
)
temp_df = pd.DataFrame(data)
temp_df["start_date"] = pd.to_datetime(temp_df["start_date"]).dt.date
temp_df["weight"] = pd.to_numeric(temp_df["weight"])
return temp_df
def sw_index_daily(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
) -> pd.DataFrame:
"""
申万指数一级和二级日频率行情数据
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 申万指数日频率行情数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel2.aspx"
params = {
"ctable": "swindexhistory",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 10:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
open_ = cols[3].text
high = cols[4].text
low = cols[5].text
close = cols[6].text
vol = cols[7].text
amount = cols[8].text
change_pct = cols[9].text
data.append(
{
"index_code": symbol.replace(",", ""),
"index_name": index_name.replace(",", ""),
"date": date.replace(",", ""),
"open": open_.replace(",", ""),
"high": high.replace(",", ""),
"low": low.replace(",", ""),
"close": close.replace(",", ""),
"vol": vol.replace(",", ""),
"amount": amount.replace(",", ""),
"change_pct": change_pct.replace(",", ""),
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["vol"] = pd.to_numeric(temp_df["vol"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df["change_pct"] = pd.to_numeric(temp_df["change_pct"])
return temp_df
def sw_index_daily_indicator(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
) -> pd.DataFrame:
"""
申万一级和二级行业历史行情指标
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param data_type: choice of {"Day": 日报表, "Week": 周报表}
:type data_type: str
:return: 申万指数不同频率数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel.aspx"
params = {
"ctable": "V_Report",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}' and type='{data_type}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 14:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
close = cols[3].text
volume = cols[4].text
chg_pct = cols[5].text
turn_rate = cols[6].text
pe = cols[7].text
pb = cols[8].text
v_wap = cols[9].text
turnover_pct = cols[10].text
float_mv = cols[11].text
avg_float_mv = cols[12].text
dividend_yield_ratio = cols[13].text
data.append(
{
"index_code": symbol,
"index_name": index_name,
"date": date,
"close": close,
"volume": volume,
"chg_pct": chg_pct,
"turn_rate": turn_rate,
"pe": pe,
"pb": pb,
"vwap": v_wap,
"float_mv": float_mv,
"avg_float_mv": avg_float_mv,
"dividend_yield_ratio": dividend_yield_ratio,
"turnover_pct": turnover_pct,
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", ""))
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"])
temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"])
temp_df["pe"] = pd.to_numeric(temp_df["pe"])
temp_df["pb"] = pd.to_numeric(temp_df["pb"])
temp_df["vwap"] = pd.to_numeric(temp_df["vwap"])
temp_df["float_mv"] = temp_df["float_mv"].apply(lambda x: x.replace(",", ""))
temp_df["float_mv"] = pd.to_numeric(
temp_df["float_mv"],
)
temp_df["avg_float_mv"] = temp_df["avg_float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["avg_float_mv"] = pd.to_numeric(temp_df["avg_float_mv"])
temp_df["dividend_yield_ratio"] = pd.to_numeric(temp_df["dividend_yield_ratio"])
temp_df["turnover_pct"] = pd.to_numeric(temp_df["turnover_pct"])
return temp_df
def sw_index_third_info() -> pd.DataFrame:
"""
乐咕乐股-申万三级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
def sw_index_third_cons(symbol: str = "851921.SI") -> pd.DataFrame:
"""
乐咕乐股-申万三级-行业成份
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
:param symbol: 三级行业的行业代码
:type symbol: str
:return: 行业成份
:rtype: pandas.DataFrame
"""
url = f"https://legulegu.com/stockdata/index-composition?industryCode={symbol}"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"序号",
"股票代码",
"股票简称",
"纳入时间",
"申万1级",
"申万2级",
"申万3级",
"价格",
"市盈率",
"市盈率ttm",
"市净率",
"股息率",
"市值",
]
temp_df["价格"] = pd.to_numeric(temp_df["价格"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["市盈率ttm"] = pd.to_numeric(temp_df["市盈率ttm"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
temp_df["股息率"] = pd.to_numeric(temp_df["股息率"].str.strip("%"), errors="coerce")
temp_df["市值"] = pd.to_numeric(temp_df["市值"], errors="coerce")
return temp_df
if __name__ == "__main__":
sw_index_representation_spot_df = sw_index_representation_spot()
print(sw_index_representation_spot_df)
sw_index_spot_df = sw_index_spot()
print(sw_index_spot_df)
sw_index_second_spot_df = sw_index_second_spot()
print(sw_index_second_spot_df)
sw_index_cons_df = sw_index_cons(symbol="801193")
print(sw_index_cons_df)
sw_index_daily_df = sw_index_daily(
symbol="801733", start_date="20001201", end_date="20211207"
)
print(sw_index_daily_df)
sw_index_daily_indicator_df = sw_index_daily_indicator(
symbol="801003",
start_date="20191101",
end_date="20191207",
data_type="Week",
)
print(sw_index_daily_indicator_df)
sw_index_third_info_df = sw_index_third_info()
print(sw_index_third_info_df)
sw_index_third_cons_df = sw_index_third_cons(symbol="851921.SI")
print(sw_index_third_cons_df)
|
py | 1a48634e93838ce9a0d2fed2105813e7fc5cbbfc | import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
from torchvision import transforms
import torch
# read data
import os
import numpy as np
from PIL import Image
# utils
TYPE = ['VA_Set', 'EXPR_Set', 'AU_Set']
CLASS = [2, 1, 12]
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
READERS = {
'VA_Set': lambda path: np.genfromtxt(path, dtype=np.single, delimiter=',', skip_header=True),
'EXPR_Set': lambda path: np.genfromtxt(path, dtype=np.int_, skip_header=True),
'AU_Set': lambda path: np.genfromtxt(path, dtype=np.single, delimiter=',', skip_header=True)
}
# datasets
class UnifiedDataset(Dataset):
def __init__(self,
idx: list,
image: np.ndarray,
label: dict,
img_size: int,
mode: str):
# get image
self.idx = idx
self.image = image
self.label = label
# preprocess
if mode == 'Train_Set':
self.preprocess = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.Resize(size=img_size),
transforms.ToTensor(),
transforms.Normalize(
mean=MEAN,
std=STD)
])
else:
self.preprocess = transforms.Compose([
transforms.Resize(size=img_size),
transforms.ToTensor(),
transforms.Normalize(
mean=MEAN,
std=STD)
])
def __getitem__(self, i):
image = Image.open(self.image[i])
image = self.preprocess(image)
label = [self.label['VA_Set'][i],
[self.label['EXPR_Set'][i]],
self.label['AU_Set'][i]]
label = np.concatenate(label)
return image, torch.FloatTensor(label)
def __len__(self):
return len(self.idx)
class UnifiedDataModule(pl.LightningDataModule):
def __init__(self, params: dict):
super().__init__()
self.batch_size = params.get('batch_size', 32)
self.img_size = params.get('img_size', 224)
self.num_workers = params.get('num_workers', 4)
self.dataset_dir = params.get('dataset_dir', '../dataset/Aff-Wild/')
with open(os.path.join(self.dataset_dir, 'file.txt')) as f:
self.image = list(map(lambda x: os.path.join(self.dataset_dir, 'cropped_aligned', x.strip()),
f.readlines()))
self.image = np.array(self.image)
self.label = {}
for label_type in TYPE:
self.label[label_type] = READERS[label_type](os.path.join(self.dataset_dir, label_type + '.txt'))
self.index = np.arange(0, len(self.image))
self.train_idx, self.val_idx = train_test_split(self.index, train_size=0.95, random_state=1234)
def setup(self, stage: str = None) -> None:
if stage == 'fit':
self.train_dataset = UnifiedDataset(
self.train_idx,
self.image,
self.label,
self.img_size,
'Train_Set')
self.val_dataset = UnifiedDataset(
self.val_idx,
self.image,
self.label,
self.img_size,
'Validation_Set')
elif stage == 'validate':
self.val_dataset = UnifiedDataset(
self.val_idx,
self.image,
self.label,
self.img_size,
'Validation_Set')
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers)
if __name__ == '__main__':
os.chdir('..')
dm = UnifiedDataModule({'dataset_dir':'../dataset/Aff-Wild/'})
dm.setup('fit')
dataloader = dm.train_dataloader()
print(len(dataloader.dataset))
img, label = next(iter(dataloader))
print(img.shape, label.shape) |
py | 1a48639f1e27199deb7de4cbe696d8a37d82329a | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
if (sys.version_info > (3,)):
import http.client
from http.client import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
else:
import httplib
from httplib import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
from flask import request, session, make_response
from flask_restful import Resource
from cairis.daemon.CairisHTTPError import ARMHTTPError
from cairis.data.PersonaDAO import PersonaDAO
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.MessageDefinitions import PersonaMessage, PersonaEnvironmentPropertiesMessage, ValueTypeMessage
from cairis.tools.ModelDefinitions import PersonaModel, PersonaEnvironmentPropertiesModel, ValueTypeModel
from cairis.tools.SessionValidator import get_session_id, get_model_generator
__author__ = 'Shamal Faily'
class PersonasAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = PersonaDAO(session_id)
personas = dao.get_personas(constraint_id=constraint_id)
dao.close()
resp = make_response(json_serialize(personas, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
new_persona = dao.from_json(request)
persona_id = dao.add_persona(new_persona)
dao.close()
resp_dict = {'message': 'Persona successfully added', 'persona_id': persona_id}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
class PersonaByNameAPI(Resource):
def get(self, name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona = dao.get_persona_by_name(name=name)
dao.close()
resp = make_response(json_serialize(persona, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def put(self, name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
req = dao.from_json(request)
dao.update_persona(req, name=name)
dao.close()
resp_dict = {'message': 'Persona successfully updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def delete(self, name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
dao.delete_persona(name=name)
dao.close()
resp_dict = {'message': 'Persona successfully deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class PersonaModelByNameAPI(Resource):
def get(self, persona, variable, characteristic):
session_id = get_session_id(session, request)
model_generator = get_model_generator()
dao = PersonaDAO(session_id)
if variable == 'All': variable = ''
if characteristic == 'All': characteristic = ''
dot_code = dao.get_persona_model(persona,variable,characteristic)
dao.close()
resp = make_response(model_generator.generate(dot_code, model_type='persona', renderer='dot'), OK)
accept_header = request.headers.get('Accept', 'image/svg+xml')
if accept_header.find('text/plain') > -1:
resp.headers['Content-type'] = 'text/plain'
else:
resp.headers['Content-type'] = 'image/svg+xml'
return resp
class PersonaCharacteristicsByNameAPI(Resource):
def get(self, persona, variable, characteristic):
session_id = get_session_id(session, request)
model_generator = get_model_generator()
dao = PersonaDAO(session_id)
if variable == 'All': variable = ''
if characteristic == 'All': characteristic = ''
char_names = dao.get_persona_characteristics(persona,variable,characteristic)
dao.close()
resp = make_response(json_serialize(char_names, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class PersonaNamesAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona_names = dao.get_persona_names()
dao.close()
resp = make_response(json_serialize(persona_names, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class PersonaTypesAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
pTypes = dao.get_persona_types()
dao.close()
resp = make_response(json_serialize(pTypes, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class PersonaEnvironmentPropertiesAPI(Resource):
def get(self, persona_name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona_props = dao.get_persona_props(name=persona_name)
dao.close()
resp = make_response(json_serialize(asset_props, session_id=session_id))
resp.contenttype = 'application/json'
return resp
def put(self, persona_name):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
persona_prop = dao.from_json(request, to_props=True)
dao.update_persona_properties(persona_prop, name=persona_name)
dao.close()
resp_dict = {'message': 'The persona properties were successfully updated.'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
class PersonasSummaryAPI(Resource):
def get(self):
session_id = get_session_id(session, request)
dao = PersonaDAO(session_id)
objts = dao.get_personas_summary()
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
|
py | 1a48640a696e1d9f17acfbc823a82758ab8d3924 | import functools, operator
def read_map(fh):
hmap = {}
for y, line in enumerate(fh):
for x, h in enumerate(line.strip()):
hmap[x,y] = int(h)
return hmap
def neighbours(k):
return [(k[0],k[1]+1), (k[0],k[1]-1), (k[0]+1,k[1]), (k[0]-1,k[1])]
def low_point(hmap, k):
return functools.reduce(operator.and_, (hmap[k] < hmap.get(n, 10) for n in neighbours(k)))
def risk_map(hmap):
return sum(1 + hmap[k] for k in hmap if low_point(hmap, k))
def basin_size(hmap, k, seen=set()):
if k in seen or k not in hmap or hmap[k] == 9:
return 0
else:
seen.add(k)
return 1 + sum(basin_size(hmap, n, seen) for n in neighbours(k))
def basin_sizes(hmap):
sizes = sorted(basin_size(hmap, k) for k in hmap if low_point(hmap, k))
return sizes[-1] * sizes[-2] * sizes[-3]
with open("day09.txt", "r") as fh:
hmap = read_map(fh)
print("2021 day 09 part 1: %d" % risk_map(hmap))
print("2021 day 09 part 2: %d" % basin_sizes(hmap))
|
py | 1a48657bec6765e6ae688d3db94e81df787ebafc | import os
import sys
import shutil
import jsbeautifier
from utils.content import getContent
from utils.formatter import Formatter
from utils.merge import Merge
from utils.classify import Classifier
class Process:
def main(self, file, argvs):
path = sys.path[0]
print("****************************")
print(file)
res = jsbeautifier.beautify_file(file)
preFile = "preformat_" + file
op = open(preFile, "w+")
op.write(res)
op.close()
oFileContent = getContent(preFile)
formatFile = Formatter()
formatFile.formatter(file, oFileContent)
fFile = "formatted_" + file
fFileContent = getContent(fFile)
isAbnormal = False
isHighRisk = False
mergeFile = Merge()
isAbnormal, isHighRisk = mergeFile.mergeReduce(file, fFileContent, argvs)
print(isAbnormal, isHighRisk)
srcProcessedPath = path + "/" + file
if not isAbnormal and not isHighRisk:
#classify processible contract
classify = Classifier()
mFile = "merged_" + file
mFileContent = getContent(mFile)
isProcessible = classify.classifier(mFileContent)
print(isProcessible)
srcProcessiblePath = path + "/" + mFile
if isProcessible:
dstProcessiblePath = path + "/Processible/" + mFile
shutil.copy(srcProcessiblePath, dstProcessiblePath)
print(mFile, " is processible and has been put in the Processible directory.")
os.remove(srcProcessiblePath)
else:
os.remove(srcProcessiblePath)
desProcessedPath = path + "/ProcessedContracts/" + file
noteStr = "ProcessedContracts"
elif not isAbnormal and isHighRisk:
desProcessedPath = path + "/varRepeatContracts/" + file
noteStr = "varRepeatContracts"
elif isAbnormal and not isHighRisk:
desProcessedPath = path + "/abnormalContracts/" + file
noteStr = "abnormalContracts"
shutil.copy(srcProcessedPath, desProcessedPath)
print(file, " has been moved to the " + noteStr +" directory.")
#remove formatted contract
formattedFile = path + "/" + fFile
os.remove(formattedFile)
os.remove(preFile)
os.remove(srcProcessedPath)
if __name__ == "__main__":
filename = sys.argv[1]
argvs = ''
if len(sys.argv) > 2:
argvs = sys.argv[2]
main(filename, argvs)
|
py | 1a4867583420b28776fcbe8333e8c5f974ea1694 | import os
import shutil
import yaml
from six import iteritems
from ..base import PackageJson, BasePackageManager, PackageManagerError
from .lockfile import PnpmLockfile
from .workspace import PnpmWorkspace
from .utils import build_pj_path, build_lockfile_path, build_ws_config_path, build_nm_bundle_path
class PnpmPackageManager(BasePackageManager):
_STORE_NM_PATH = os.path.join(".pnpm", "store")
_VSTORE_NM_PATH = os.path.join(".pnpm", "virtual-store")
_STORE_VER = "v3"
def install(self):
"""
Creates node_modules directory according to the lockfile.
"""
self._prepare_workspace()
self._exec_command([
"install",
"--offline",
"--frozen-lockfile",
"--store-dir", self._nm_path(self._STORE_NM_PATH),
"--virtual-store-dir", self._nm_path(self._VSTORE_NM_PATH),
"--no-verify-store-integrity",
"--package-import-method", "hardlink",
"--ignore-pnpmfile",
"--ignore-scripts",
"--strict-peer-dependencies",
])
self._fix_stores_in_modules_yaml()
def get_peer_paths_from_package_json(self):
"""
Returns paths of direct workspace dependencies (source root related).
:rtype: list of str
"""
pj = PackageJson.load(build_pj_path(self.sources_path))
return map(lambda x: os.path.normpath(os.path.join(self.module_path, x[1])), pj.get_workspace_dep_paths())
def calc_node_modules_inouts(self):
"""
Returns input and output paths for command that creates `node_modules` bundle.
:return: Pair of input and output paths with correct roots ($S or $B).
:rtype: (list of str, list of str)
"""
# Inputs: source package.json and lockfile, built package.jsons, lockfiles and workspace configs of deps, tarballs.
ins = []
# Source lockfiles are used only to get tarballs info.
src_lf_paths = [build_lockfile_path(self.sources_path)]
pj = PackageJson.load(build_pj_path(self.sources_path))
for [dep_src_path, (dep_pj, depth)] in iteritems(pj.get_workspace_map()):
if dep_src_path == self.sources_path:
continue
dep_mod_path = dep_src_path[len(self.sources_root) + 1:]
# pnpm requires all package.jsons.
ins.append(build_pj_path(dep_mod_path))
dep_lf_src_path = build_lockfile_path(dep_src_path)
if not os.path.isfile(dep_lf_src_path):
continue
src_lf_paths.append(dep_lf_src_path)
# Merged workspace configs and lockfiles of direct deps.
if depth == 1:
ins.append(build_ws_config_path(dep_mod_path))
ins.append(build_lockfile_path(dep_mod_path))
for pkg in self.extract_packages_meta_from_lockfiles(src_lf_paths):
ins.append(self._contrib_tarball_path(pkg))
s_root = lambda x: os.path.join("$S", x)
b_root = lambda x: os.path.join("$B", x)
ins = map(b_root, ins) + [
s_root(build_pj_path(self.module_path)),
s_root(build_lockfile_path(self.module_path)),
]
# Outputs: patched lockfile, generated workspace config, created node_modules bundle.
outs = [b_root(f(self.module_path)) for f in (build_lockfile_path, build_ws_config_path, build_nm_bundle_path)]
return (ins, outs)
def extract_packages_meta_from_lockfiles(self, lf_paths):
"""
:type lf_paths: iterable of BaseLockfile
:rtype: iterable of LockfilePackageMeta
"""
tarballs = set()
for lf_path in lf_paths:
try:
for pkg in PnpmLockfile.load(lf_path).get_packages_meta():
if pkg.tarball_path not in tarballs:
tarballs.add(pkg.tarball_path)
yield pkg
except Exception as e:
raise PackageManagerError("Unable to process lockfile {}: {}".format(lf_path, e))
def _prepare_workspace(self):
pj = self._build_package_json()
ws = PnpmWorkspace(build_ws_config_path(self.build_path))
ws.set_from_package_json(pj)
dep_paths = ws.get_paths()
self._build_merged_workspace_config(ws, dep_paths)
self._build_merged_lockfile(dep_paths)
def _build_package_json(self):
"""
:rtype: PackageJson
"""
in_pj_path = build_pj_path(self.sources_path)
out_pj_path = build_pj_path(self.build_path)
shutil.copyfile(in_pj_path, out_pj_path)
return PackageJson.load(out_pj_path)
def _build_merged_lockfile(self, dep_paths):
"""
:type dep_paths: list of str
:rtype: PnpmLockfile
"""
in_lf_path = build_lockfile_path(self.sources_path)
out_lf_path = build_lockfile_path(self.build_path)
lf = PnpmLockfile.load(in_lf_path)
# Change to the output path for correct path calcs on merging.
lf.path = out_lf_path
for dep_path in dep_paths:
if dep_path is self.build_path:
continue
lf_path = build_lockfile_path(dep_path)
if os.path.isfile(lf_path):
lf.merge(PnpmLockfile.load(lf_path))
lf.update_tarball_resolutions(lambda p: self._contrib_tarball_url(p))
lf.write()
def _build_merged_workspace_config(self, ws, dep_paths):
"""
:type ws: PnpmWorkspaceConfig
:type dep_paths: list of str
"""
for dep_path in dep_paths:
if dep_path is self.build_path:
continue
ws_config_path = build_ws_config_path(dep_path)
if os.path.isfile(ws_config_path):
ws.merge(PnpmWorkspace.load(ws_config_path))
ws.write()
def _fix_stores_in_modules_yaml(self):
"""
Ensures that store paths are the same as would be after installing deps in the source dir.
This is required to reuse `node_modules` after build.
"""
with open(self._nm_path(".modules.yaml"), "r+") as f:
data = yaml.load(f, Loader=yaml.CSafeLoader)
# NOTE: pnpm requires absolute store path here.
data["storeDir"] = os.path.join(self.sources_path, "node_modules", self._STORE_NM_PATH, self._STORE_VER)
data["virtualStoreDir"] = self._VSTORE_NM_PATH
f.seek(0)
yaml.dump(data, f, Dumper=yaml.CSafeDumper)
f.truncate()
def _get_default_options(self):
return super(PnpmPackageManager, self)._get_default_options() + [
"--stream",
"--reporter", "append-only",
"--no-color",
]
def _get_debug_log_path(self):
return self._nm_path(".pnpm-debug.log")
|
py | 1a486799fc0d0584650e776bc5e6992f2d64905b | import unittest
from models import articles
Articles = articles.Articles
class ArticlesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Articles class
'''
def setUp(self):
'''
Set up method that will run before every test
'''
self.new_articles = Articles('id','author','description','https://www.youtube.com/watch?v=RN75zSpYp7M',"https://i.kinja-img.com/gawker-media/image/upload/s--yDtXY-I4--/c_fill,fl_progressive,g_center,h_900,q_80,w_1600/pj5jc9ntilzdb4dfnivl.png",'kenya','content')
def test_instance(self):
self.assertTrue(isinstance(self.new_articles,Articles))
if __name__ == '__main__':
unittest.main() |
py | 1a4867fcd2f0df5be9687bc2d3920e9658b396ed | # -*- coding: utf-8 -*-
from seleniumbase import BaseCase
class ChinesePdfTests(BaseCase):
def test_chinese_pdf(self):
pdf = (
"https://github.com/seleniumbase/SeleniumBase/"
"files/3895614/unittest.pdf"
)
# Get and print PDF text
pdf_text = self.get_pdf_text(pdf, page=2)
self._print("\n" + pdf_text)
# Assert PDF contains the expected text on Page 2
self.assert_pdf_text(pdf, "个测试类", page=2)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(pdf, "运行单元测试")
self.assert_pdf_text(pdf, "等待测试结束后显示所有结果")
self.assert_pdf_text(pdf, "测试的执行跟方法的顺序没有关系")
|
py | 1a4868c0544744b2651ef336da522860baf4b68a | import unittest
from yoti_python_sdk.doc_scan.session.retrieve.frame_response import FrameResponse
from yoti_python_sdk.doc_scan.session.retrieve.media_response import MediaResponse
class FrameResponseTest(unittest.TestCase):
def test_should_parse_correctly(self):
data = {"media": {}}
result = FrameResponse(data)
assert isinstance(result.media, MediaResponse)
def test_should_parse_when_none(self):
result = FrameResponse(None)
assert isinstance(result, FrameResponse)
assert result.media is None
if __name__ == "__main__":
unittest.main()
|
py | 1a4868c311462caaa30cc9dd5bfbcfc494658bef | from datetime import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from tagging.fields import TagField
import object_feeds
class Paper(models.Model):
""" A formal write-up of results. """
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey("content_type", "object_id")
title = models.CharField(_("title"), max_length=255, unique=True)
slug = models.SlugField()
creator = models.ForeignKey(User, verbose_name=_("creator"), related_name="%(class)s_created")
created = models.DateTimeField(_("created"), default=datetime.now)
last_editor = models.ForeignKey(User, verbose_name=_("last_editor"), related_name="%(class)s_edited")
last_edited = models.DateTimeField(default=datetime.now)
tags = TagField()
contributor_users = models.ManyToManyField(User,
through = "PaperContributor",
verbose_name = _("contributor")
)
### denormalization
# votes
yeas = models.PositiveIntegerField(default=0, editable=False)
nays = models.PositiveIntegerField(default=0, editable=False)
votes = models.PositiveIntegerField(default=0, editable=False)
# contributors
contributors_count = models.PositiveIntegerField(default=0, editable=False)
# comments
comments_count = models.PositiveIntegerField(default=0, editable=False)
# followers
followers_count = models.PositiveIntegerField(default=0, editable=False)
class Meta:
app_label = "papers"
verbose_name = _("Paper")
verbose_name_plural = _("Papers")
ordering = ['slug']
get_latest_by = 'last_edited'
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("paper_detail", kwargs={"slug": self.slug})
def user_is_contributor(self, user):
return self.contributors.filter(user=user).exists()
@property
def current(self):
return self.revisions.latest()
@property
def revision(self, rev_number):
return self.revisions.get(revision=rev_number)
object_feeds.register(Paper)
class PaperRevision(models.Model):
""" A change in Paper. """
paper = models.ForeignKey(Paper, verbose_name=_(u'Paper'), related_name="revisions")
editor = models.ForeignKey(User, verbose_name=_(u'Editor'), null=True)
revision = models.IntegerField(_(u"Revision Number"))
comment = models.CharField(_(u"Editor comment"), max_length=255, blank=True)
content = models.TextField(_(u"Content"))
created = models.DateTimeField(_(u"Modified at"), default=datetime.now)
yeas = models.PositiveIntegerField(default=0, editable=False)
nays = models.PositiveIntegerField(default=0, editable=False)
votes = models.PositiveIntegerField(default=0, editable=False)
class Meta:
verbose_name = _(u'Paper revision')
verbose_name_plural = _(u'Paper revisions')
get_latest_by = 'created'
ordering = ['-revision']
def __unicode__(self):
return ugettext('Revision %(created)s for %(page_title)s') % {
'created': self.created.strftime('%Y%m%d-%H%M'),
'page_title': self.paper.title,
}
def get_absolute_url(self):
return reverse("paper_revision", kwargs={"paper_id": self.paper.id, "revision_number": self.revision})
class PaperContributor(models.Model):
paper = models.ForeignKey(Paper, related_name = "contributors", verbose_name = _("paper"))
user = models.ForeignKey(User, related_name = "papers", verbose_name = _("user"))
contributions = models.PositiveIntegerField(_("contributions"), default=1)
away = models.BooleanField(_("away"), default=False)
away_message = models.CharField(_("away_message"), max_length=500)
away_since = models.DateTimeField(_("away since"), default=datetime.now)
class Meta:
unique_together = [("user", "paper")]
from django.db.models.signals import pre_save, post_save
def paper_feed_title_update(sender, instance, created, **kwargs):
instance.feed.title = instance.title
instance.feed.save()
post_save.connect(paper_feed_title_update, sender=Paper)
|
py | 1a4869c77243b6d70bee5265eaddcff888194e6d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-20 17:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0002_constraints'),
]
operations = [
migrations.AlterModelOptions(
name='anniversary',
options={'get_latest_by': 'updated', 'ordering': ('order', 'name'), 'verbose_name': 'Gedenktag', 'verbose_name_plural': 'Gedenktage'},
),
migrations.AlterModelOptions(
name='approximate',
options={'get_latest_by': 'updated', 'ordering': ('start_time',), 'verbose_name': 'Ungefährer Zeitpunkt', 'verbose_name_plural': 'Ungefähre Zeitpunkte'},
),
migrations.AlterModelOptions(
name='calendar',
options={'get_latest_by': 'updated', 'ordering': ('season__name',), 'verbose_name': 'Kalender', 'verbose_name_plural': 'Kalender'},
),
migrations.AlterModelOptions(
name='category',
options={'get_latest_by': 'updated', 'ordering': ('order', 'code', 'name'), 'verbose_name': 'Kategorie', 'verbose_name_plural': 'Kategorien'},
),
migrations.AlterModelOptions(
name='collective',
options={'get_latest_by': 'updated', 'ordering': ('order', 'name'), 'verbose_name': 'Gruppe', 'verbose_name_plural': 'Gruppen'},
),
migrations.AlterModelOptions(
name='equipment',
options={'get_latest_by': 'updated', 'ordering': ('code',), 'verbose_name': 'Ausrüstung', 'verbose_name_plural': 'Ausrüstungen'},
),
migrations.AlterModelOptions(
name='event',
options={'get_latest_by': 'updated', 'ordering': ('start_date',), 'verbose_name': 'Veranstaltungstermin', 'verbose_name_plural': 'Veranstaltungstermine'},
),
migrations.AlterModelOptions(
name='fitness',
options={'get_latest_by': 'updated', 'ordering': ('order', 'code'), 'verbose_name': 'Konditionelle Anforderung', 'verbose_name_plural': 'Konditionelle Anforderungen'},
),
migrations.AlterModelOptions(
name='fitnessdescription',
options={'get_latest_by': 'updated', 'ordering': ('fitness__code', 'category__order'), 'verbose_name': 'Beschreibung der Konditionelle Anforderung', 'verbose_name_plural': 'Beschreibungen der Konditionelle Anforderungen'},
),
migrations.AlterModelOptions(
name='guide',
options={'get_latest_by': 'updated', 'ordering': ('user__last_name', 'user__first_name'), 'verbose_name': 'Touren/Kurs/Gruppenleiter', 'verbose_name_plural': 'Touren/Kurs/Gruppenleiter'},
),
migrations.AlterModelOptions(
name='instruction',
options={'get_latest_by': 'updated', 'ordering': ('instruction__start_date', 'topic__order'), 'verbose_name': 'Kurs', 'verbose_name_plural': 'Kurse'},
),
migrations.AlterModelOptions(
name='part',
options={'get_latest_by': 'updated', 'ordering': ('order', 'name'), 'verbose_name': 'Abschnitt', 'verbose_name_plural': 'Abschnitte'},
),
migrations.AlterModelOptions(
name='profile',
options={'get_latest_by': 'updated', 'ordering': ('user__last_name', 'user__first_name'), 'verbose_name': 'Steckbrief', 'verbose_name_plural': 'Steckbriefe'},
),
migrations.AlterModelOptions(
name='retraining',
options={'get_latest_by': 'updated', 'ordering': ['year', 'order'], 'verbose_name': 'Fortbildung', 'verbose_name_plural': 'Fortbildungen'},
),
migrations.AlterModelOptions(
name='season',
options={'get_latest_by': 'updated', 'ordering': ('name',), 'verbose_name': 'Saison', 'verbose_name_plural': 'Saisonen'},
),
migrations.AlterModelOptions(
name='section',
options={'get_latest_by': 'updated', 'ordering': ('order', 'name'), 'verbose_name': 'Unterabschnitt', 'verbose_name_plural': 'Unterabschnitte'},
),
migrations.AlterModelOptions(
name='session',
options={'get_latest_by': 'updated', 'ordering': ('collective__season__name', 'collective__name', 'session__start_date'), 'verbose_name': 'Gruppentermin', 'verbose_name_plural': 'Gruppentermine'},
),
migrations.AlterModelOptions(
name='skill',
options={'get_latest_by': 'updated', 'ordering': ('order', 'code'), 'verbose_name': 'Technische Anforderung', 'verbose_name_plural': 'Technische Anforderungen'},
),
migrations.AlterModelOptions(
name='skilldescription',
options={'get_latest_by': 'updated', 'ordering': ('skill__code', 'category__order'), 'verbose_name': 'Beschreibung der technischen Anforderung', 'verbose_name_plural': 'Beschreibung der technischen Anforderungen'},
),
migrations.AlterModelOptions(
name='state',
options={'get_latest_by': 'updated', 'ordering': ('order', 'name'), 'verbose_name': 'Bearbeitungsstand', 'verbose_name_plural': 'Bearbeitungsstände'},
),
migrations.AlterModelOptions(
name='talk',
options={'get_latest_by': 'updated', 'ordering': ('talk__start_date',), 'verbose_name': 'Vortrag', 'verbose_name_plural': 'Vortäge'},
),
migrations.AlterModelOptions(
name='topic',
options={'get_latest_by': 'updated', 'ordering': ('season__name', 'order', 'name'), 'verbose_name': 'Kursinhalt', 'verbose_name_plural': 'Kursinhalte'},
),
migrations.AlterModelOptions(
name='tour',
options={'get_latest_by': 'updated', 'ordering': ('tour__start_date',), 'verbose_name': 'Gemeinschaftstour', 'verbose_name_plural': 'Gemeinschaftstouren'},
),
migrations.AlterModelOptions(
name='vacation',
options={'get_latest_by': 'updated', 'ordering': ('start_date', 'name'), 'verbose_name': 'Ferien', 'verbose_name_plural': 'Ferien'},
),
migrations.AddField(
model_name='category',
name='deadline',
field=models.BooleanField(db_index=True, default=False, help_text='Kategorie für den Anmeldeschluss', verbose_name='Anmeldeschluss'),
),
migrations.AddField(
model_name='category',
name='preliminary',
field=models.BooleanField(db_index=True, default=False, help_text='Kategorie für die Vorbesprechung', verbose_name='Vorbesprechung'),
),
]
|
py | 1a486aeb97cee7fc392adb804ad59decb5488c31 | from synapseclient.activity import Activity
# SYNPY-744
def test_private_getStringList():
act = Activity()
url_string = \
'https://github.com/Sage-Bionetworks/ampAdScripts/blob/master/Broad-Rush/migrateROSMAPGenotypesFeb2015.R'
act.used([{'wasExecuted': True,
'concreteType': 'org.sagebionetworks.repo.model.provenance.UsedURL',
'url': url_string}
])
assert [url_string] == act._getStringList()
|
py | 1a486b5cccd224f7f3bd57855fa4aa2249c3c753 | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# darkmoss scheme by Gabriel Avanzi (https://github.com/avanzzzi)
base00 = "#171e1f"
base01 = "#252c2d"
base02 = "#373c3d"
base03 = "#555e5f"
base04 = "#818f80"
base05 = "#c7c7a5"
base06 = "#e3e3c8"
base07 = "#e1eaef"
base08 = "#ff4658"
base09 = "#e6db74"
base0A = "#fdb11f"
base0B = "#499180"
base0C = "#66d9ef"
base0D = "#498091"
base0E = "#9bc0c8"
base0F = "#d27b53"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base01
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base05
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base02
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base02
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base02
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base0B
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base02
#Foreground color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base05
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base02
# Foreground color for the selected item in filename prompts.
c.colors.prompts.selected.fg = base05
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base01
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base01
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
|
py | 1a486d30a53a9dbc45372244fccbac57c1a9f10c | from datetime import datetime, date
from six import iteritems, PY2, PY3, u
import json
import pytz
from enum import Enum
if PY3:
from datetime import timezone
# compat
from six.moves import map
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime) or isinstance(obj, date) else None
class OutputModes(Enum):
"""List of valid settings for the output_mode parameter of the OpenTok.start_archive()
method."""
composed = u('composed')
"""All streams in the archive are recorded to a single (composed) file."""
individual = u('individual')
"""Each stream in the archive is recorded to an individual file."""
class Archive(object):
"""Represents an archive of an OpenTok session.
:ivar created_at:
The time at which the archive was created, in milliseconds since the UNIX epoch.
:ivar duration:
The duration of the archive, in milliseconds.
:ivar has_audio:
Boolean value set to true when the archive contains an audio track,
and set to false otherwise.
:ivar has_video:
Boolean value set to true when the archive contains a video track,
and set to false otherwise.
:ivar id:
The archive ID.
:ivar name:
The name of the archive. If no name was provided when the archive was created, this is set
to null.
:ivar output_mode:
Whether all streams in the archive are recorded to a single file
(OutputModes.composed) or to individual files (OutputModes.individual).
:ivar partnerId:
The API key associated with the archive.
:ivar reason:
For archives with the status "stopped", this can be set to "90 mins exceeded", "failure",
"session ended", or "user initiated". For archives with the status "failed", this can be set
to "system failure".
:ivar sessionId:
The session ID of the OpenTok session associated with this archive.
:ivar size:
The size of the MP4 file. For archives that have not been generated, this value is set to 0.
:ivar status:
The status of the archive, which can be one of the following:
* "available" -- The archive is available for download from the OpenTok cloud.
* "expired" -- The archive is no longer available for download from the OpenTok cloud.
* "failed" -- The archive recording failed.
* "paused" -- The archive is in progress and no clients are publishing streams to the
session. When an archive is in progress and any client publishes a stream, the status is
"started". When an archive is paused, nothing is recorded. When a client starts publishing
a stream, the recording starts (or resumes). If all clients disconnect from a session that
is being archived, the status changes to "paused", and after 60 seconds the archive
recording stops (and the status changes to "stopped").
* "started" -- The archive started and is in the process of being recorded.
* "stopped" -- The archive stopped recording.
* "uploaded" -- The archive is available for download from the the upload target
Amazon S3 bucket or Windows Azure container that you set at the
`OpenTok dashboard <https://dashboard.tokbox.com>`_.
:ivar url:
The download URL of the available MP4 file. This is only set for an archive with the status set to
"available"; for other archives, (including archives with the status "uploaded") this property is
set to null. The download URL is obfuscated, and the file is only available from the URL for
10 minutes. To generate a new URL, call the Archive.listArchives() or OpenTok.getArchive() method.
"""
def __init__(self, sdk, values):
self.sdk = sdk
self.id = values.get('id')
self.name = values.get('name')
self.status = values.get('status')
self.session_id = values.get('sessionId')
self.partner_id = values.get('partnerId')
if PY2:
self.created_at = datetime.fromtimestamp(values.get('createdAt') / 1000, pytz.UTC)
if PY3:
self.created_at = datetime.fromtimestamp(values.get('createdAt') // 1000, timezone.utc)
self.size = values.get('size')
self.duration = values.get('duration')
self.has_audio = values.get('hasAudio')
self.has_video = values.get('hasVideo')
self.output_mode = OutputModes[values.get('outputMode', 'composed')]
self.url = values.get('url')
def stop(self):
"""
Stops an OpenTok archive that is being recorded.
Archives automatically stop recording after 90 minutes or when all clients have disconnected
from the session being archived.
"""
temp_archive = self.sdk.stop_archive(self.id)
for k,v in iteritems(temp_archive.attrs()):
setattr(self, k, v)
def delete(self):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
"""
self.sdk.delete_archive(self.id)
# TODO: invalidate this object
def attrs(self):
"""
Returns a dictionary of the archive's attributes.
"""
return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk")
def json(self):
"""
Returns a JSON representation of the archive.
"""
return json.dumps(self.attrs(), default=dthandler, indent=4)
class ArchiveList(object):
def __init__(self, sdk, values):
self.count = values.get('count')
self.items = list(map(lambda x: Archive(sdk, x), values.get('items', [])))
def __iter__(self):
for x in self.items:
yield x
def attrs(self):
return {
'count': self.count,
'items': map(Archive.attrs, self.items)
}
def json(self):
return json.dumps(self.attrs(), default=dthandler, indent=4)
def __getitem__(self, key):
return self.items.get(key)
def __setitem__(self, key, item):
raise ArchiveError(u('Cannot set item {0} for key {1} in Archive object').format(item, key))
def __len__(self):
return len(self.items)
|
py | 1a486db4de22c63c27e928d0755d49a5c4c99b6f | import hashlib
import os
def upload_path(instance, filename, **kwargs):
hasher = hashlib.md5()
for chunk in instance.image.chunks():
hasher.update(chunk)
hash = hasher.hexdigest()
base, ext = os.path.splitext(filename)
return '%(first)s/%(second)s/%(hash)s/%(base)s%(ext)s' % {
'first': hash[0],
'second': hash[1],
'hash': hash,
'base': base,
'ext': ext,
}
|
py | 1a486dfd9092760f22f143361e2fb52e067a3b17 | import os
import sys
import django
import logging
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
profile = os.environ.get('HELLOFAMILYCLUB', 'develop')
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'hellofamilyclub.settings.{}'.format(profile))
django.setup()
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from pictures.service.weibo import fetch_weibo_pictures
from pictures.service.recognize import recognize_all_pictures
from pictures.service.config import db_client
from news.service.helloproject_news import run_collect_hello_project_news
logging.basicConfig(filename='/Users/yuhao/log/job.log', filemode='a')
logging.getLogger('apscheduler').setLevel(logging.DEBUG)
jobstores = {
'mongo': MongoDBJobStore(collection='job', database='hellofamily',
client=db_client),
'default': MemoryJobStore()
}
executors = {
'default': ThreadPoolExecutor(20),
'processpool': ProcessPoolExecutor(5),
}
job_defaults = {
'coalesce': False,
'max_instances': 10,
}
scheduler = BlockingScheduler(jobstores=jobstores, executors=executors,
job_defaults=job_defaults)
scheduler.add_job(fetch_weibo_pictures, 'interval', hours=1,
replace_existing=True, id='fetch_weibo_pictures',
jobstore='mongo', max_instances=1)
scheduler.add_job(recognize_all_pictures, 'interval', hours=1,
replace_existing=True, id='recognize_all_pictures',
jobstore='mongo', max_instances=1)
scheduler.add_job(run_collect_hello_project_news, 'interval', hours=2,
replace_existing=True, id='collect_hello_project_news',
jobstore='mongo', max_instances=1)
scheduler.start()
|
py | 1a486e20b45e323dce963b1139d133ea8a2586d6 | import os
from appi2c.ext.database import db
def init_app(app):
app.config["SECRET_KEY"] = "appi2c_from_raspberry"
basedir = os.path.abspath(os.path.dirname('ext/database/'))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'database.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['FLASK_ADMIN_SWATCH'] = 'cosmo'
app.config["MAX_IMAGE_FILESIZE"] = 10 * 1024 * 1024
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPEG", "JPG", "PNG", "GIF"]
if app.debug:
app.config["DEBUG_TB_TEMPLATE_EDITOR_ENABLED"] = True
app.config["DEBUG_TB_PROFILER_ENABLED"] = True
|
py | 1a48701ac649dcd9545d56abec9f924d7683c9e5 | import abc
class LossFunction(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def compute_loss(self, batch, **kwargs):
pass
|
py | 1a4870209dbb61ad95702298378498efc8e78021 | import pathlib
import os
import unittest
from explainaboard import FileType, Source, TaskType, get_loader, get_processor
artifacts_path = os.path.dirname(pathlib.Path(__file__)) + "/artifacts/"
class TestTextPairClassification(unittest.TestCase):
def test_snli(self):
metadata = {"task_name": TaskType.text_classification.value,
"metric_names": ["Accuracy"]}
path_data = artifacts_path+ "test-snli.tsv"
loader = get_loader(TaskType.text_pair_classification, Source.local_filesystem, FileType.tsv, path_data)
data = loader.load()
processor = get_processor(TaskType.text_pair_classification, metadata, data)
self.assertEqual(len(processor._features), 8)
analysis = processor.process()
#analysis.to_memory()
# analysis.write_to_directory("./")
self.assertListEqual(analysis.metric_names, metadata["metric_names"])
# self.assertIsNotNone(analysis.results.fine_grained)
# self.assertGreater(len(analysis.results.overall), 0)
|
py | 1a4870d372eed090d088f45bd189f0605513dd55 | import numpy as np
import os
import sys
sys.path.append('mytorch')
from loss import *
from activation import *
from batchnorm import *
from linear import *
class MLP(object):
"""
A simple multilayer perceptron
"""
def __init__(self, input_size, output_size, hiddens, activations, weight_init_fn,
bias_init_fn, criterion, lr, momentum=0.0, num_bn_layers=0):
self.train_mode = True
self.num_bn_layers = num_bn_layers
self.bn = num_bn_layers > 0
self.nlayers = len(hiddens) + 1
self.input_size = input_size
self.output_size = output_size
self.activations = activations
self.criterion = criterion
self.lr = lr
self.momentum = momentum
if (len(hiddens) <= 0):
self.linear_layers = [Linear(input_size, output_size, weight_init_fn, bias_init_fn)]
else:
self.linear_layers = []
self.linear_layers.append(Linear(input_size, hiddens[0], weight_init_fn, bias_init_fn))
for i in range(1, len(hiddens)):
self.linear_layers.append(Linear(hiddens[i-1], hiddens[i], weight_init_fn, bias_init_fn))
self.linear_layers.append(Linear(hiddens[-1], output_size, weight_init_fn, bias_init_fn))
if self.bn:
self.bn_layers = [BatchNorm(hiddens[i]) for i in range(num_bn_layers)]
self.output = None
def forward(self, x):
"""
Argument:
x (np.array): (batch size, input_size)
Return:
out (np.array): (batch size, output_size)
"""
for i in range(len(self.linear_layers)):
x = self.linear_layers[i](x)
if i < self.num_bn_layers:
x = self.bn_layers[i](x, (not self.train_mode))
x = self.activations[i](x)
# x = self.activations[-1](x)
self.output = x
return x
def zero_grads(self):
for i in range(len(self.linear_layers)):
self.linear_layers[i].dW.fill(0.0)
def step(self):
for i in range(len(self.linear_layers)):
self.linear_layers[i].momentum_W = self.momentum * self.linear_layers[i].momentum_W - self.lr * self.linear_layers[i].dW
# print(self.linear_layers[i].dW)
self.linear_layers[i].W = self.linear_layers[i].W + self.linear_layers[i].momentum_W
self.linear_layers[i].momentum_b = self.momentum * self.linear_layers[i].momentum_b - self.lr * self.linear_layers[i].db
self.linear_layers[i].b = self.linear_layers[i].b + self.linear_layers[i].momentum_b
if self.bn:
for i in range(len(self.bn_layers)):
self.bn_layers[i].gamma = self.bn_layers[i].gamma - self.lr * self.bn_layers[i].dgamma
# self.bn_layers[i].gamma = self.bn_layers[i].gamma/np.sqrt(self.bn_layers[i].running_var + self.bn_layers[i].eps)
self.bn_layers[i].beta = self.bn_layers[i].beta - self.lr * self.bn_layers[i].dbeta
# self.bn_layers[i].beta = self.bn_layers[i].beta - self.bn_layers[i].gamma * self.bn_layers[i].running_mean
def backward(self, labels):
self.criterion.forward(self.output, labels)
grd = self.criterion.derivative()
# print(self.criterion.logsum)
for i in range(self.nlayers - 1, -1,-1):
grd = self.activations[i].derivative() * grd
# print(grd)
if self.bn and i < self.num_bn_layers:
grd = self.bn_layers[i].backward(grd)
grd = self.linear_layers[i].backward(grd)
return grd
def error(self, labels):
return (np.argmax(self.output, axis = 1) != np.argmax(labels, axis = 1)).sum()
def total_loss(self, labels):
return self.criterion(self.output, labels).sum()
def __call__(self, x):
return self.forward(x)
def train(self):
self.train_mode = True
def eval(self):
self.train_mode = False
def get_training_stats(mlp, dset, nepochs, batch_size):
train, val, _ = dset
trainx, trainy = train
valx, valy = val
idxs = np.arange(len(trainx))
training_losses = np.zeros(nepochs)
training_errors = np.zeros(nepochs)
validation_losses = np.zeros(nepochs)
validation_errors = np.zeros(nepochs)
for e in range(nepochs):
print(e)
t_row= np.arange(trainx.shape[0])
np.random.shuffle(t_row)
trainx = trainx[t_row,:]
trainy = trainy[t_row,:]
# print(t_row == idxs)
# Per epoch setup ...
batchmean = []
batchtotal = []
for b in range(0, len(trainx), batch_size):
mlp.zero_grads()
mlp.forward(trainx[b:b+batch_size, :])
mlp.backward(trainy[b:b+batch_size, :])
batchtotal.append(mlp.total_loss(trainy[b:b+batch_size, :])/batch_size)
# print(type(mlp.total_loss(trainy[b:b+batch_size, :])))
batchmean.append(mlp.error(trainy[b:b+batch_size, :])/batch_size)
mlp.step()
valloss = []
valerror = []
for b in range(0, len(valx), batch_size):
mlp.forward(valx[b:batch_size, :])
valloss.append(mlp.total_loss(valy[b:batch_size, :])/batch_size)
valerror.append(mlp.error(valy[b:batch_size, :])/batch_size)
training_errors[e] = np.array(batchmean).mean()
training_losses[e] = np.array(batchtotal).mean()
validation_errors[e] = np.array(valerror).mean()
validation_losses[e] = np.array(valloss).mean()
print(np.min(training_losses))
print(np.min(training_errors))
return (training_losses, training_errors, validation_losses, validation_errors)
|
py | 1a487166c2d49ad6737dc8e062c89844e9adaaea | from ._city_transformer_postscripts import CityTransformerPostscripts
from ._city_transformer_inverse_postscripts import CityTransformerInversePostscripts
def get_postscripts(name):
POST_SCRIPTS = {
'CityTransformer': CityTransformerPostscripts,
'CityTransformerInverse': CityTransformerInversePostscripts,
}
for n, p in POST_SCRIPTS.items():
if n.lower() == name.lower():
return p
raise ValueError(f'trainer {name} is not defined')
|
py | 1a487179ed78aa3dcca060f14d2629e6b97a7791 | def humanise_passage(book: str, start_chapter: str, start_verse: str, end_chapter: str, end_verse: str) -> str:
if len(start_chapter) == 0: # e.g. James
return book
if len(start_verse) == 0 and len(end_chapter) == 0 and len(end_verse) == 0: # e.g. Genesis 1
return f"{book} chapter {start_chapter}"
if len(start_verse) == 0 and len(end_verse) == 0: # e.g. Genesis 1 - 2
return f"{book} chapters {start_chapter} to {end_chapter}"
if len(start_verse) == 0: # e.g. Genesis 1 - 2:3
return f"{book} chapter {start_chapter} to chapter {end_chapter} verse {end_verse}"
if len(end_chapter) == 0 and len(end_verse) == 0: # e.g. Genesis 1:2
return f"{book} chapter {start_chapter} verse {start_verse}"
if len(end_chapter) == 0: # e.g. Genesis 1:2-3
return f"{book} chapter {start_chapter} verses {start_verse} to {end_verse}"
# e.g. Genesis 1:2 - 2:3
return f"{book} chapter {start_chapter} verse {start_verse}- to chapter {end_chapter} verse {end_verse}"
|
py | 1a4871e8f433d61bf1627458d57ae31d1f13ac23 | import math
from typing import Optional
import torch
from falkon.options import BaseOptions
import falkon
from falkon.mmv_ops.utils import _setup_opt, _get_cpu_ram
from falkon.sparse.sparse_tensor import SparseTensor
from falkon.utils.helpers import select_dim_over_d, sizeof_dtype, select_dim_over_m
from falkon.utils.tensor_helpers import create_same_stride
def fmmv_cpu_sparse(X1: SparseTensor,
X2: SparseTensor,
v: torch.Tensor,
kernel: 'falkon.kernels.Kernel',
out: Optional[torch.Tensor],
opt: BaseOptions):
opt = _setup_opt(opt, is_cpu=True)
dtype = X1.dtype
ntot, dtot = X1.size()
mtot, T = v.size()
# Create output matrix
if out is None:
out = torch.empty(ntot, T, dtype=dtype)
out.fill_(0.0)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# Narrowing X1, X2: n + m
# Prepare - not computable, depends on kernel
# ker_chunk : n*m
# finalize : 0 (if can be implemented in place, kernel-dependent)
n, m = select_dim_over_m(
maxM=mtot, maxN=ntot,
coef_nm=1, coef_n=1, coef_m=1, tot=avail_mem)
ker_chunk = create_same_stride((n, m), out, dtype, device='cpu')
for i in range(0, ntot, n):
ic = min(n, ntot - i)
cur_out = out[i:i + ic, :]
X1_chunk = X1.narrow_rows(i, ic)
for j in range(0, mtot, m):
jc = min(m, mtot - j)
X2_chunk = X2.narrow_rows(j, jc)
cur_ker_chunk = ker_chunk[:ic, :jc]
cur_ker_chunk.fill_(0.0)
ddd = kernel._prepare_sparse(X1_chunk, X2_chunk)
kernel._apply_sparse(X1_chunk, X2_chunk.transpose_csc(), cur_ker_chunk)
kernel._finalize(cur_ker_chunk, ddd)
# Multiply by the vector v
cur_out.addmm_(cur_ker_chunk, v.narrow(0, j, jc))
return out
def fmmv_cpu(X1, X2, v, kernel, out, opt):
"""Blockwise kernel-vector product
This function computes ``kernel(X1, X2) @ v`` in a blockwise fashion, to avoid having the
whole N*M kernel matrix in memory at once.
Note that while the principle is that of matrix-vector product, `v` can have more than
one column.
Parameters
-----------
X1
[N, D] array
X2
[M, D] array
v
[M, T] array
kernel
Class representing the desired kernel function
out : torch.Tensor or None
[N, T] array for storing the kernel-vector product output.
If None, will be allocated within the function.
opt
Basic options dictionary, used for determining available memory.
"""
opt = _setup_opt(opt, is_cpu=True)
ntot, dtot = X1.size(0), X1.size(1)
M, T = v.size()
dtype = v.dtype
# Create output matrix
if out is None:
out = torch.empty(ntot, T, dtype=dtype)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# Only necessary memory allocation is that for the temporary kernel
# `temp_out` of size n*M
n, d = select_dim_over_d(
maxD=dtot, maxN=ntot,
coef_nd=0, coef_n=M, coef_d=0, rest=0, tot=avail_mem)
# Run batched matrix multiplication
for i in range(0, ntot, n):
ic = min(n, ntot - i)
ddd = kernel._prepare(X1.narrow(0, i, ic), X2) # , v=v)
temp_out = torch.zeros(ic, M, dtype=dtype)
for k in range(0, dtot, d):
kc = min(d, dtot - k)
X1d = X1[i: i + ic, k: k + kc]
X2d = X2[:, k: k + kc]
kernel._apply(X1d, X2d.T, temp_out)
# temp_out = fnc(X1*X2', X1, X2)
kernel._finalize(temp_out, ddd)
torch.mm(temp_out, v, out=out[i: i + ic, :])
return out
def fdmmv_cpu(X1, X2, v, w, kernel, out, opt):
"""Calculate a double kernel-vector product.
This function computes the following quantity: ``kernel(X1, X2).T @ (kernel(X1, X2) @ v + w)``
Where one of `v` or `w` can be empty.
All arrays passed to this function must be 2-dimensional, although
the second dimension can be unitary.
The expression is not computed directly. We separate the computation
into smaller blocks so as to reduce the total memory consumption (the
large N*M kernel matrix is never wholly stored in RAM.)
Parameters
-----------
X1
[N, D] array
X2
[M, D] array
v : torch.Tensor or None
[M, T] array. But note that at least one of v or w must be specified.
w : torch.Tensor or None
[N, T] array. But note that at least one of v or w must be specified.
kernel
Class representing the desired kernel function
out : torch.Tensor or None
[M, T] array for storing the kernel-vector product output.
If None, will be allocated within the function.
opt
Basic options dictionary, used for determining available memory.
"""
opt = _setup_opt(opt, is_cpu=True)
# Parameter validation
if v is None and w is None:
raise ValueError("One of v and w must be specified to run fMMV.")
T = v.shape[1] if v is not None else w.shape[1]
ntot, dtot = X1.size()
M = X2.size(0)
dtype = X1.dtype
# Create output matrix
if out is None:
out = torch.empty(M, T, dtype=dtype)
out.fill_(0)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# The only necessary temporary matrices are: `temp_out` of size n*M and
# temp_w_block of size n*T
n, d = select_dim_over_d(
maxD=dtot, maxN=ntot,
coef_nd=0, coef_n=M + T, coef_d=0, rest=0, tot=avail_mem)
# Run Batched Matrix Computation
for i in range(0, ntot, n):
ic = min(n, ntot - i)
ddd = kernel._prepare(X1[i: i + ic, :], X2)
temp_out = torch.zeros(ic, M, dtype=dtype)
for k in range(0, dtot, d):
kc = min(d, dtot - k)
X1d = X1[i: i + ic, k: k + kc]
X2d = X2[:, k: k + kc]
kernel._apply(X1d, X2d.T, temp_out)
kernel._finalize(temp_out, ddd) # fnc(X1*X2', X1, X2) [n x M]
w_blk = torch.zeros(ic, T, dtype=dtype) # n x T
if w is not None:
w_blk.copy_(w[i: i + ic, :])
if v is not None:
# w_blk + c_out * v => (n x T) + (n x M)*(M x T)
w_blk.addmm_(temp_out, v)
out.add_(torch.mm(temp_out.T, w_blk))
return out
def fdmmv_cpu_sparse(X1: SparseTensor,
X2: SparseTensor,
v: Optional[torch.Tensor],
w: Optional[torch.Tensor],
kernel,
out: Optional[torch.Tensor] = None,
opt: Optional[BaseOptions] = None):
opt = _setup_opt(opt, is_cpu=True)
# Parameter validation
if v is None and w is None:
raise ValueError("One of v and w must be specified to run fMMV.")
T = v.size(1) if v is not None else w.size(1)
ntot, dtot = X1.size()
M = X2.size(0)
dtype = X1.dtype
# Create output matrix
if out is None:
out = torch.empty(M, T, dtype=dtype)
out.fill_(0)
avail_mem = _get_cpu_ram(opt, 0.95) / sizeof_dtype(dtype)
# Narrow X1 : n
# ker_chunk : n*M
# w_blk : n*T
n = avail_mem / (M * T + 1)
n = int(math.floor(n))
if n < 1:
raise MemoryError(("Available memory %.2fGB is insufficient "
"for blockwise fdMMv.") % (avail_mem * sizeof_dtype(dtype) / 2**30))
# Allocate fixed arrays
ker_chunk = create_same_stride((n, M), out, dtype, device='cpu')
w_blk = create_same_stride((n, T), out, dtype, device='cpu')
# Run blocked fdmmv
for i in range(0, ntot, n):
ic = min(n, ntot - i)
X1_chunk = X1.narrow_rows(i, ic)
cur_ker_chunk = ker_chunk[:ic]
cur_ker_chunk.fill_(0.0)
ddd = kernel._prepare_sparse(X1_chunk, X2)
kernel._apply_sparse(X1_chunk, X2.transpose_csc(), cur_ker_chunk)
kernel._finalize(cur_ker_chunk, ddd)
# Multiply by the vector v
cur_w_blk = w_blk[:ic] # n x T
cur_w_blk.fill_(0.0)
if w is not None:
cur_w_blk.copy_(w[i: i + ic, :])
if v is not None:
# w_blk + c_out * v => (n x T) + (n x M)*(M x T)
cur_w_blk.addmm_(cur_ker_chunk, v)
out.addmm_(cur_ker_chunk.T, cur_w_blk)
del ker_chunk, w_blk
return out
|
py | 1a48723d584e308175ccf2a05cf504246d9b89b7 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
# TODO(jbudorick): Split these constants into coherent modules.
# pylint: disable=W0212
import collections
import logging
import os
import subprocess
import devil.android.sdk.keyevent
from devil.android.sdk import version_codes
from devil.constants import exit_codes
keyevent = devil.android.sdk.keyevent
DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir)))
PackageInfo = collections.namedtuple('PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket',
'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
'legacy_browser': PackageInfo(
'com.google.android.browser',
'com.android.browser.BrowserActivity',
None,
None,
None),
'chromecast_shell': PackageInfo(
'com.google.android.apps.mediashell',
'com.google.android.apps.mediashell.MediaShellActivity',
'/data/local/tmp/castshell-command-line',
None,
None),
'content_shell': PackageInfo(
'org.chromium.content_shell_apk',
'org.chromium.content_shell_apk.ContentShellActivity',
'/data/local/tmp/content-shell-command-line',
None,
'org.chromium.content_shell_apk.tests'),
'android_webview_shell': PackageInfo(
'org.chromium.android_webview.shell',
'org.chromium.android_webview.shell.AwShellActivity',
'/data/local/tmp/android-webview-command-line',
None,
'org.chromium.android_webview.test'),
'gtest': PackageInfo(
'org.chromium.native_test',
'org.chromium.native_test.NativeUnitTestActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'components_browsertests': PackageInfo(
'org.chromium.components_browsertests_apk',
('org.chromium.components_browsertests_apk' +
'.ComponentsBrowserTestsActivity'),
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'content_browsertests': PackageInfo(
'org.chromium.content_browsertests_apk',
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'chromedriver_webview_shell': PackageInfo(
'org.chromium.chromedriver_webview_shell',
'org.chromium.chromedriver_webview_shell.Main',
None,
None,
None),
}
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
TEST_POLICY_SERVER_PORT = 9051
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = (
'/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
ANDROID_SDK_VERSION = version_codes.MARSHMALLOW
ANDROID_SDK_BUILD_TOOLS_VERSION = '23.0.1'
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'sdk')
ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party', 'android_tools', 'ndk')
PROGUARD_SCRIPT_PATH = os.path.join(
ANDROID_SDK_ROOT, 'tools', 'proguard', 'bin', 'proguard.sh')
PROGUARD_ROOT = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'proguard')
BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR', 'out'),
'bad_devices.json')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
# TODO(jbudorick): Remove once unused.
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
# TODO(jbudorick): Rework this into testing/buildbot/
PYTHON_UNIT_TEST_SUITES = {
'pylib_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'devil.android.device_utils_test',
'devil.android.md5sum_test',
'devil.utils.cmd_helper_test',
'pylib.results.json_results_test',
'pylib.utils.proguard_test',
]
},
'gyp_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
'test_modules': [
'java_cpp_enum_tests',
'java_google_api_keys_tests',
]
},
}
LOCAL_MACHINE_TESTS = ['junit', 'python']
VALID_ENVIRONMENTS = ['local', 'remote_device']
VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
'perf', 'python', 'uirobot']
VALID_DEVICE_TYPES = ['Android', 'iOS']
def GetBuildType():
try:
return os.environ['BUILDTYPE']
except KeyError:
raise EnvironmentError(
'The BUILDTYPE environment variable has not been set')
def SetBuildType(build_type):
os.environ['BUILDTYPE'] = build_type
def SetBuildDirectory(build_directory):
os.environ['CHROMIUM_OUT_DIR'] = build_directory
def SetOutputDirectory(output_directory):
os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
def GetOutDirectory(build_type=None):
"""Returns the out directory where the output binaries are built.
Args:
build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
globally set build type environment variable BUILDTYPE.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
GetBuildType() if build_type is None else build_type))
# TODO(jbudorick): Convert existing callers to AdbWrapper.GetAdbPath() and
# remove this.
def GetAdbPath():
from devil.android.sdk import adb_wrapper
return adb_wrapper.AdbWrapper.GetAdbPath()
# Exit codes
ERROR_EXIT_CODE = exit_codes.ERROR
INFRA_EXIT_CODE = exit_codes.INFRA
WARNING_EXIT_CODE = exit_codes.WARNING
|
py | 1a48727d042ee63a5537b4038ce84d6bfbddefc8 | """For admin view."""
import logging
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import REDIRECT_FIELD_NAME
# from django.contrib.auth.views import password_change
from django.contrib import messages
from cpovc_access.forms import StrictPasswordChangeForm
from cpovc_access.models import AccessLog, AccessAttempt
from cpovc_access.models import PasswordChange, UserChange
logger = logging.getLogger(__name__)
def unlock_user(modeladmin, request, queryset):
"""
These takes in a Django queryset and spits out a CSV file.
Generic method for any queryset
"""
# model = qs.model
queryset.update(failures_since_start=0)
message = ('User(s) failed login counts reset to 0. '
'User(s) can now log in.')
messages.info(request, message)
unlock_user.short_description = u"Unlock selected user(s)"
class AccessAttemptAdmin(admin.ModelAdmin):
"""Class for handling attempts."""
list_display = (
'attempt_time',
'ip_address',
'user_agent',
'username',
'path_info',
'failures_since_start',
)
list_filter = [
'attempt_time',
'ip_address',
'username',
'path_info',
]
search_fields = [
'ip_address',
'username',
'user_agent',
'path_info',
]
date_hierarchy = 'attempt_time'
fieldsets = (
(None, {
'fields': ('path_info', 'failures_since_start')
}),
('Form Data', {
'fields': ('get_data', 'post_data')
}),
('Meta Data', {
'fields': ('user_agent', 'ip_address', 'http_accept')
})
)
actions = [unlock_user]
admin.site.register(AccessAttempt, AccessAttemptAdmin)
class AccessLogAdmin(admin.ModelAdmin):
"""Class for handling access logs."""
list_display = (
'attempt_time',
'logout_time',
'ip_address',
'username',
'user_agent',
'path_info',
)
list_filter = [
'attempt_time',
'logout_time',
'ip_address',
'username',
'path_info',
]
search_fields = [
'ip_address',
'user_agent',
'username',
'path_info',
]
date_hierarchy = 'attempt_time'
fieldsets = (
(None, {
'fields': ('path_info',)
}),
('Meta Data', {
'fields': ('user_agent', 'ip_address', 'http_accept')
})
)
admin.site.register(AccessLog, AccessLogAdmin)
def admin_login(request, extra_context=None):
"""Redirect to default login view which enforces auth policy."""
next_page = request.get_full_path()
next_url = next_page.split('=')[1] if '=' in next_page else next_page
q = REDIRECT_FIELD_NAME + '=' + next_url
return HttpResponseRedirect(reverse('login') + '?' + q)
admin.site.login = admin_login
def admin_logout(request, extra_context=None):
"""Redirect to default login page and not /admin area."""
return HttpResponseRedirect(reverse('login'))
admin.site.logout = admin_logout
class PasswordChangeAdmin(admin.ModelAdmin):
"""Class to handle password change."""
readonly_fields = ('user', 'timestamp', 'successful', 'is_temporary')
fields = ('user', 'timestamp', 'successful', 'is_temporary')
list_display = ('user', 'successful', 'is_temporary', 'timestamp')
list_filter = ('successful', 'is_temporary')
date_hierarchy = 'timestamp'
def has_add_permission(self, request):
"""Method to handle add permissions."""
return False
def has_delete_permission(self, request, obj=None):
"""Method to handle delete permission."""
return False
def save_model(self, request, obj, form, change):
"""Do not actually save anything to prevent changes."""
logger.info('Prevented change in PasswordChange item by user %s',
request.user)
def get_actions(self, request):
"""Disable deletion of user changes action."""
actions = super(PasswordChangeAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
admin.site.register(PasswordChange, PasswordChangeAdmin)
class UserChangeAdmin(admin.ModelAdmin):
"""Class to handle user changes."""
readonly_fields = ('user', 'timestamp', 'by_user')
fields = ('user', 'timestamp', 'by_user')
list_display = ('user', 'by_user', 'timestamp')
date_hierarchy = 'timestamp'
def has_add_permission(self, request):
"""Method to handle add permission."""
return False
def has_delete_permission(self, request, obj=None):
"""Method to handle delete permission."""
return False
def save_model(self, request, obj, form, change):
"""Do not actually save anything to prevent changes."""
logger.info('Prevented change in UserChange item by user %s',
request.user)
def get_actions(self, request):
"""Disable deletion of user changes action."""
actions = super(UserChangeAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
admin.site.register(UserChange, UserChangeAdmin)
def admin_password_change(request):
"""Handle the "change password" task - both display and validation."""
to_url = reverse('admin:password_change_done', current_app=admin.site.name)
defaults = {
'current_app': admin.site.name,
'post_change_redirect': to_url,
'password_change_form': StrictPasswordChangeForm
}
if admin.site.password_change_template is not None:
defaults['template_name'] = admin.site.password_change_template
return password_change(request, **defaults)
admin.site.password_change = admin_password_change
|
py | 1a487390c379cc75bebb0959ba2870b612505c78 | # Note: Before running this from your laptop, you must run "ray attach cluster.yaml -p 8000" to setup a port-forward from the laptop's port 8000 to the cluster's internal port 8000
# The other option is to use "ray submit" to run this on the cluster as-is without a port-forward
import requests
input_text_list = ["Ray Serve is great!", "Serving frameworks without DAG support are not great."]
for input_text in input_text_list:
prediction = requests.get("http://127.0.0.1:8000/invocations", data=input_text).text
print("Average prediction for '{}' is {}".format(input_text, prediction))
|
py | 1a4873d18b978c223f336033c07c753205ebbea9 | class Example:
'Common base class for all employee'
def printValue(self):
print(Example.__doc__)
print(Example.__name__)
print(Example.__dict__)
print(Example.__module__)
print(Example.__bases__)
example = Example()
example.printValue()
|
py | 1a48740c2a86902ff1b4d5b708717dc6d1073b3c | #!/usr/bin/env python
"""__init__ file for interator module."""
from .interator import (prime_stream, composite_stream, polygonal_stream,
fibonacci_stream, negafibonacci_stream, lucas_stream,
is_prime, miller_rabin, is_composite, is_polygonal,
is_fibonacci, is_lucas, nth_fibonacci)
|
py | 1a4874276c6443e6cc4cdc2ef2ee99961e59d5ae | import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, GROUP_NAME
PROJECT_NAME = 'project_settings_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
gl = get_gitlab()
def fin():
gl.delete_project(GROUP_AND_PROJECT_NAME)
request.addfinalizer(fin)
return gl # provide fixture value
config_builds_for_private_projects = """
gitlab:
api_version: 4
project_settings:
project_settings:
builds_access_level: private
visibility: private
"""
class TestProjectSettings:
def test__builds_for_private_projects(self, gitlab):
gf = GitLabForm(config_string=config_builds_for_private_projects,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
settings = gitlab.get_project_settings(GROUP_AND_PROJECT_NAME)
assert settings['visibility'] == 'private'
# there is no such field in the "Get single project" API :/
#assert settings['builds_access_level'] is 'private'
|
py | 1a4875173394465d869f15e59a7c55195ec98922 | # -*- coding: utf-8 -*-
import json
import requests
from openerp import http
from openerp.http import request
def s2human(time):
"""Convert a time in second into an human readable string"""
for delay, desc in [(86400,'d'),(3600,'h'),(60,'m')]:
if time >= delay:
return str(int(time / delay)) + desc
return str(int(time)) + "s"
class RunbotButtons(http.Controller):
def build_info(self, build):
real_build = build.duplicate_id if build.state == 'duplicate' else build
return {
'id': build.id,
'name': build.name,
'state': real_build.state,
'result': real_build.result,
'subject': build.subject,
'author': build.author,
'committer': build.committer,
'dest': build.dest,
'real_dest': real_build.dest,
'job_age': s2human(real_build.job_age),
'job_time': s2human(real_build.job_time),
'job': real_build.job,
'domain': real_build.domain,
'host': real_build.host,
'port': real_build.port,
'subject': build.subject,
'server_match': real_build.server_match,
}
def build_html(self, build):
res = []
try:
url = 'http://%s/instance_introspection.json' % build.domain
response = requests.get(url,
timeout=5.00)
if response.status_code == requests.codes.ok:
res = response.json()
except requests.exceptions.Timeout:
res = [{'info': {'error': 'Timeout',
'message': '''Instance is not running
https://github.com/Vauxoo/server-tools/tree/8.0/instance_introspection
read the help to know how configure it properlly'''}}]
except requests.exceptions.TooManyRedirects:
res = [{'info': {'error': 'TooMany redirect',
'message': '''Install properly the instance_introspection:
https://github.com/Vauxoo/server-tools/tree/8.0/instance_introspection
read the help to know how'''}}]
except requests.exceptions.RequestException as e:
res = [{'info': {'error': 'Unknown Error',
'message': '''%s''' % e.message}}]
# catastrophic error. bail.
_logger.log(e)
return res
@http.route(['/vauxooci/build_button/<build_id>'], type='http', auth="public", website=True)
def build(self, build_id=None, search=None, **post):
registry, cr, uid, context = request.registry, request.cr, request.uid, request.context
Build = registry['runbot.build']
build = Build.browse(cr, uid, [int(build_id)])[0]
if not build.exists():
return request.not_found()
context = {
'introspection': build.introspection,
'introspection_html': self.build_html(build),
'repo': build.repo_id,
'bu': self.build_info(build),
'br': {'branch': build.branch_id},
}
return request.render("vauxooci.build_button", context)
# @http.route('/runbot_frontend/runbot_frontend/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('runbot_frontend.listing', {
# 'root': '/runbot_frontend/runbot_frontend',
# 'objects': http.request.env['runbot_frontend.runbot_frontend'].search([]),
# })
# @http.route('/runbot_frontend/runbot_frontend/objects/<model("runbot_frontend.runbot_frontend"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('runbot_frontend.object', {
# 'object': obj
# })
|
py | 1a4875c7e275ab1b82b4482a519f0c45fff86c63 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 12); |
py | 1a4875e8989add16dadb5d301c5c087696e6e060 | import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="heatmap.colorbar", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
py | 1a48779e0fef07633644c30b73bcce6399120a38 | # Generated by Django 2.0.1 on 2018-12-11 16:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("organisations", "0002_organisation_duplicate_of"),
]
operations = [
migrations.AlterUniqueTogether(name="organisation", unique_together=set(),),
]
|
py | 1a4877bd63c24842fa6ff28e142e04b167d80b36 | # -*- coding:utf-8 -*-
#
# Original Author:
# Twitter OAuth Sample Script
# * techno - Hirotaka Kawata
# * http://techno-st.net/
#
import time, random
import urllib, urllib2
import hmac, hashlib
import cgi
from pit import Pit
oauth_keys = Pit.get('pytwtrmgmttool')
ckey = oauth_keys['consumer_key']
csecret = oauth_keys['consumer_secret']
atoken = ""
atoken_secret = ""
def make_signature(params, url, method, csecret, secret = ""):
# Generate Signature Base String
plist = []
for i in sorted(params):
plist.append("%s=%s" % (i, params[i]))
pstr = "&".join(plist)
msg = "%s&%s&%s" % (method, urllib.quote(url, ""),
urllib.quote(pstr, ""))
# Calculate Signature
h = hmac.new("%s&%s" % (csecret, secret), msg, hashlib.sha1)
sig = h.digest().encode("base64").strip()
return sig
def init_params():
p = {
"oauth_consumer_key": ckey,
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": str(int(time.time())),
"oauth_nonce": str(random.getrandbits(64)),
"oauth_version": "1.0"
}
return p
def oauth_header(params):
plist = []
for p in params:
plist.append('%s="%s"' % (p, urllib.quote(params[p])))
return "OAuth %s" % (", ".join(plist))
# Request Token URL
reqt_url = 'http://twitter.com/oauth/request_token'
# Authorize URL
auth_url = 'http://twitter.com/oauth/authorize'
# Access Token URL
acct_url = 'http://twitter.com/oauth/access_token'
# status update URL
post_url = 'http://twitter.com/statuses/update.json'
if not atoken and not atoken_secret:
# Request Parameters
params = init_params()
print "Get request token:",
# Generate Signature
sig = make_signature(params, reqt_url, "GET", csecret)
params["oauth_signature"] = sig
# Get Token
req = urllib2.Request("%s?%s" % (reqt_url, urllib.urlencode(params)))
resp = urllib2.urlopen(req)
print "\t[OK]"
# Parse Token Parameters
ret = cgi.parse_qs(resp.read())
token = ret["oauth_token"][0]
token_secret = ret["oauth_token_secret"][0]
# Get PIN
print "* Please access to this URL, and allow."
print "> %s?%s=%s" % (auth_url, "oauth_token", token)
print "\n* After that, will display 7 digit PIN, input here."
print "PIN ->",
pin = raw_input()
pin = int(pin)
print "Get access token:",
# Generate Access Token Request
params = init_params()
params["oauth_verifier"] = pin
params["oauth_token"] = token
sig = make_signature(params, acct_url, "GET", csecret, token_secret)
params["oauth_signature"] = sig
# Get Access Token
req = urllib2.Request("%s?%s" % (acct_url, urllib.urlencode(params)))
resp = urllib2.urlopen(req)
print "\t[OK]"
# Parse Access Token
fin = cgi.parse_qs(resp.read())
atoken = fin["oauth_token"][0]
atoken_secret = fin["oauth_token_secret"][0]
print "Access Token: %s" % atoken
print "Access Token Secret: %s" % atoken_secret
print "Your screen_name is '%s'." % fin["screen_name"][0]
# Update Status by OAuth Authorization
print "What are you doing?:",
post = raw_input()
params = init_params()
params["oauth_token"] = atoken
params["status"] = urllib.quote(post, "")
sig = make_signature(params, post_url, "POST", csecret, atoken_secret)
params["oauth_signature"] = sig
del params["status"]
req = urllib2.Request(post_url)
req.add_data("status=%s" % urllib.quote(post, ""))
req.add_header("Authorization", oauth_header(params))
|
py | 1a48781e24336b88f206198c75f21f89f4cdaa0b | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Bring Your Own Datatypes to TVM
===============================
**Authors**: `Gus Smith <https://github.com/gussmith23>`_, `Andrew Liu <https://github.com/hypercubestart>`_
In this tutorial, we will show you how to utilize the Bring Your Own Datatypes framework to use your own custom datatypes in TVM.
Note that the Bring Your Own Datatypes framework currently only handles **software emulated versions of datatypes**.
The framework does not support compiling for custom accelerator datatypes out-of-the-box.
Datatype Libraries
------------------
The Bring Your Own Datatypes allows users to register their own datatype implementations alongside TVM's native datatypes (such as ``float``).
In the wild, these datatype implementations often appear as libraries.
For example:
- `libposit <https://github.com/cjdelisle/libposit>`_, a posit library
- `Stillwater Universal <https://github.com/stillwater-sc/universal>`_, a library with posits, fixed-point numbers, and other types
- `SoftFloat <https://github.com/ucb-bar/berkeley-softfloat-3>`_, Berkeley's software implementation of IEEE 754 floating-point
The Bring Your Own Datatypes enables users to plug these datatype implementations into TVM!
In this section, we will use an example library we have already implemented, located at ``3rdparty/byodt/myfloat.cc``.
This datatype, which we dubbed "myfloat", is really just a IEE-754 float under-the-hood, but it serves a useful example
to show that any datatype can be used in the BYODT framework.
Setup
-----
Since we do not use any 3rdparty library, there is no setup needed.
If you would like to try this with your own datatype library, first bring the library's functions into the process space with ``CDLL``:
.. code-block :: python
ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
"""
######################
# A Simple TVM Program
# --------------------
#
# We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes.
import tvm
from tvm import relay
# Our basic program: Z = X + Y
x = relay.var("x", shape=(3,), dtype="float32")
y = relay.var("y", shape=(3,), dtype="float32")
z = x + y
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
######################################################################
# Now, we create random inputs to feed into this program using numpy:
import numpy as np
np.random.seed(23) # for reproducibility
x_input = np.random.rand(3).astype("float32")
y_input = np.random.rand(3).astype("float32")
print("x: {}".format(x_input))
print("y: {}".format(y_input))
######################################################################
# Finally, we're ready to run the program:
z_output = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output))
######################################################################
# Adding Custom Datatypes
# -----------------------
# Now, we will do the same, but we will use a custom datatype for our intermediate computation.
#
# We use the same input variables ``x`` and ``y`` as above, but before adding ``x + y``, we first cast both ``x`` and ``y`` to a custom datatype via the ``relay.cast(...)`` call.
#
# Note how we specify the custom datatype: we indicate it using the special ``custom[...]`` syntax.
# Additionally, note the "32" after the datatype: this is the bitwidth of the custom datatype. This tells TVM that each instance of ``myfloat`` is 32 bits wide.
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Trying to generate this program throws an error from TVM.
# TVM does not know how to handle any custom datatype out of the box!
# We first have to register the custom type with TVM, giving it a name and a type code:
tvm.target.datatype.register("myfloat", 150)
######################################################################
# Note that the type code, 150, is currently chosen manually by the user.
# See ``TVMTypeCode::kCustomBegin`` in `include/tvm/runtime/c_runtime_api.h <https://github.com/apache/tvm/blob/main/include/tvm/runtime/data_type.h>`_.
# Now we can generate our program again:
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
module = relay.transform.InferType()(module)
######################################################################
# Now we have a Relay program that uses myfloat!
print(program)
######################################################################
# Now that we can express our program without errors, let's try running it!
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(y_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Now, trying to compile this program throws an error.
# Let's dissect this error.
#
# The error is occurring during the process of lowering the custom datatype code to code that TVM can compile and run.
# TVM is telling us that it cannot find a *lowering function* for the ``Cast`` operation, when casting from source type 2 (``float``, in TVM), to destination type 150 (our custom datatype).
# When lowering custom datatypes, if TVM encounters an operation over a custom datatype, it looks for a user-registered *lowering function*, which tells it how to lower the operation to an operation over datatypes it understands.
# We have not told TVM how to lower ``Cast`` operations for our custom datatypes; thus, the source of this error.
#
# To fix this error, we simply need to specify a lowering function:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func(
{
(32, 32): "FloatToCustom32", # cast from float32 to myfloat32
}
),
"Cast",
"llvm",
"float",
"myfloat",
)
######################################################################
# The ``register_op(...)`` call takes a lowering function, and a number of parameters which specify exactly the operation which should be lowered with the provided lowering function.
# In this case, the arguments we pass specify that this lowering function is for lowering a ``Cast`` from ``float`` to ``myfloat`` for target ``"llvm"``.
#
# The lowering function passed into this call is very general: it should take an operation of the specified type (in this case, `Cast`) and return another operation which only uses datatypes which TVM understands.
#
# In the general case, we expect users to implement operations over their custom datatypes using calls to an external library.
# In our example, our ``myfloat`` library implements a ``Cast`` from ``float`` to 32-bit ``myfloat`` in the function ``FloatToCustom32``.
# To provide for the general case, we have made a helper function, ``create_lower_func(...)``,
# which does just this: given a dictionary, it replaces the given operation with a ``Call`` to the appropriate function name provided based on the op and the bit widths.
# It additionally removes usages of the custom datatype by storing the custom datatype in an opaque ``uint`` of the appropriate width; in our case, a ``uint32_t``.
# For more information, see `the source code <https://github.com/apache/tvm/blob/main/python/tvm/target/datatype.py>`_.
# We can now re-try running the program:
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# This new error tells us that the ``Add`` lowering function is not found, which is good news, as it's no longer complaining about the ``Cast``!
# We know what to do from here: we just need to register the lowering functions for the other operations in our program.
#
# Note that for ``Add``, ``create_lower_func`` takes in a dict where the key is an integer.
# For ``Cast`` operations, we require a 2-tuple to specify the ``src_bit_length`` and the ``dest_bit_length``,
# while for all other operations, the bit length is the same between the operands so we only require one integer to specify ``bit_length``.
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Add"}),
"Add",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({(32, 32): "Custom32ToFloat"}),
"Cast",
"llvm",
"myfloat",
"float",
)
# Now, we can run our program without errors.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
print("x:\t\t{}".format(x_input))
print("y:\t\t{}".format(y_input))
print("z (float32):\t{}".format(z_output))
print("z (myfloat32):\t{}".format(z_output_myfloat))
# Perhaps as expected, the ``myfloat32`` results and ``float32`` are exactly the same!
######################################################################
# Running Models With Custom Datatypes
# ------------------------------------
#
# We will first choose the model which we would like to run with myfloat.
# In this case we use `Mobilenet <https://arxiv.org/abs/1704.04861>`_.
# We choose Mobilenet due to its small size.
# In this alpha state of the Bring Your Own Datatypes framework, we have not implemented any software optimizations for running software emulations of custom datatypes; the result is poor performance due to many calls into our datatype emulation library.
#
# First let us define two helper functions to get the mobilenet model and a cat image.
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
def get_cat_image():
from tvm.contrib.download import download_testdata
from PIL import Image
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize((224, 224))
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
module, params = get_mobilenet()
######################################################################
# It's easy to execute MobileNet with native TVM:
ex = tvm.relay.create_executor("graph", mod=module, params=params)
input = get_cat_image()
result = ex.evaluate()(input).numpy()
# print first 10 elements
print(result.flatten()[:10])
######################################################################
# Now, we would like to change the model to use myfloat internally. To do so, we need to convert the network. To do this, we first define a function which will help us convert tensors:
def convert_ndarray(dst_dtype, array):
"""Converts an NDArray into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
######################################################################
# Now, to actually convert the entire network, we have written `a pass in Relay <https://github.com/gussmith23/tvm/blob/ea174c01c54a2529e19ca71e125f5884e728da6e/python/tvm/relay/frontend/change_datatype.py#L21>`_ which simply converts all nodes within the model to use the new datatype.
from tvm.relay.frontend.change_datatype import ChangeDatatype
src_dtype = "float32"
dst_dtype = "custom[myfloat]32"
module = relay.transform.InferType()(module)
# Currently, custom datatypes only work if you run simplify_inference beforehand
module = tvm.relay.transform.SimplifyInference()(module)
# Run type inference before changing datatype
module = tvm.relay.transform.InferType()(module)
# Change datatype from float to myfloat and re-infer types
cdtype = ChangeDatatype(src_dtype, dst_dtype)
expr = cdtype.visit(module["main"])
module = tvm.relay.transform.InferType()(module)
# We also convert the parameters:
params = {k: convert_ndarray(dst_dtype, v) for k, v in params.items()}
# We also need to convert our input:
input = convert_ndarray(dst_dtype, input)
# Finally, we can try to run the converted model:
try:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = tvm.relay.create_executor("graph", mod=module).evaluate(expr)(
input, **params
)
except tvm.TVMError as e:
print(str(e).split("\n")[-1])
######################################################################
# When we attempt to run the model, we get a familiar error telling us that more functions need to be registered for myfloat.
#
# Because this is a neural network, many more operations are required.
# Here, we register all the needed functions:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "FloatToCustom32"}),
"FloatImm",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else"
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_call_pure_extern,
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.call_pure_extern",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Mul"}),
"Mul",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Div"}),
"Div",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sub"}),
"Sub",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Exp"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.exp",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Max"}),
"Max",
"llvm",
"myfloat",
)
tvm.target.datatype.register_min_func(
tvm.target.datatype.create_min_lower_func({32: "MinCustom32"}, "myfloat"),
"myfloat",
)
######################################################################
# Note we are making use of two new functions: ``register_min_func`` and ``create_min_lower_func``.
#
# ``register_min_func`` takes in an integer ``num_bits`` for the bit length, and should return an operation
# representing the minimum finite representable value for the custom data type with the specified bit length.
#
# Similar to ``register_op`` and ``create_lower_func``, the ``create_min_lower_func`` handles the general case
# where the minimum representable custom datatype value is implemented using calls to an external library.
#
# Now we can finally run the model:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = relay.create_executor(mod=module).evaluate(expr)(input, **params)
result_myfloat = convert_ndarray(src_dtype, result_myfloat).numpy()
# print first 10 elements
print(result_myfloat.flatten()[:10])
# Again, note that the output using 32-bit myfloat exactly the same as 32-bit floats,
# because myfloat is exactly a float!
np.testing.assert_array_equal(result, result_myfloat)
|
py | 1a4878b504dad12eeb84c229b2c4507741c9e786 | from sympy import (Add, Matrix, Mul, S, symbols, Eq, pi, factorint, oo,
powsimp, Rational)
from sympy.core.function import _mexpand
from sympy.core.compatibility import ordered
from sympy.functions.elementary.trigonometric import sin
from sympy.solvers.diophantine import diophantine
from sympy.solvers.diophantine.diophantine import (diop_DN,
diop_solve, diop_ternary_quadratic_normal,
diop_general_pythagorean, diop_ternary_quadratic, diop_linear,
diop_quadratic, diop_general_sum_of_squares, diop_general_sum_of_even_powers,
descent, diop_bf_DN, divisible, equivalent, find_DN, ldescent, length,
reconstruct, partition, power_representation,
prime_as_sum_of_two_squares, square_factor, sum_of_four_squares,
sum_of_three_squares, transformation_to_DN, transformation_to_normal,
classify_diop, base_solution_linear, cornacchia, sqf_normal, gaussian_reduce, holzer,
check_param, parametrize_ternary_quadratic, sum_of_powers, sum_of_squares,
_diop_ternary_quadratic_normal, _diop_general_sum_of_squares, _nint_or_floor,
_odd, _even, _remove_gcd, _can_do_sum_of_squares)
from sympy.utilities import default_sort_key
from sympy.testing.pytest import slow, raises, XFAIL
from sympy.utilities.iterables import (
signed_permutations)
a, b, c, d, p, q, x, y, z, w, t, u, v, X, Y, Z = symbols(
"a, b, c, d, p, q, x, y, z, w, t, u, v, X, Y, Z", integer=True)
t_0, t_1, t_2, t_3, t_4, t_5, t_6 = symbols("t_:7", integer=True)
m1, m2, m3 = symbols('m1:4', integer=True)
n1 = symbols('n1', integer=True)
def diop_simplify(eq):
return _mexpand(powsimp(_mexpand(eq)))
def test_input_format():
raises(TypeError, lambda: diophantine(sin(x)))
raises(TypeError, lambda: diophantine(x/pi - 3))
def test_nosols():
# diophantine should sympify eq so that these are equivalent
assert diophantine(3) == set()
assert diophantine(S(3)) == set()
def test_univariate():
assert diop_solve((x - 1)*(x - 2)**2) == set([(1,), (2,)])
assert diop_solve((x - 1)*(x - 2)) == set([(1,), (2,)])
def test_classify_diop():
raises(TypeError, lambda: classify_diop(x**2/3 - 1))
raises(ValueError, lambda: classify_diop(1))
raises(NotImplementedError, lambda: classify_diop(w*x*y*z - 1))
raises(NotImplementedError, lambda: classify_diop(x**3 + y**3 + z**4 - 90))
assert classify_diop(14*x**2 + 15*x - 42) == (
[x], {1: -42, x: 15, x**2: 14}, 'univariate')
assert classify_diop(x*y + z) == (
[x, y, z], {x*y: 1, z: 1}, 'inhomogeneous_ternary_quadratic')
assert classify_diop(x*y + z + w + x**2) == (
[w, x, y, z], {x*y: 1, w: 1, x**2: 1, z: 1}, 'inhomogeneous_general_quadratic')
assert classify_diop(x*y + x*z + x**2 + 1) == (
[x, y, z], {x*y: 1, x*z: 1, x**2: 1, 1: 1}, 'inhomogeneous_general_quadratic')
assert classify_diop(x*y + z + w + 42) == (
[w, x, y, z], {x*y: 1, w: 1, 1: 42, z: 1}, 'inhomogeneous_general_quadratic')
assert classify_diop(x*y + z*w) == (
[w, x, y, z], {x*y: 1, w*z: 1}, 'homogeneous_general_quadratic')
assert classify_diop(x*y**2 + 1) == (
[x, y], {x*y**2: 1, 1: 1}, 'cubic_thue')
assert classify_diop(x**4 + y**4 + z**4 - (1 + 16 + 81)) == (
[x, y, z], {1: -98, x**4: 1, z**4: 1, y**4: 1}, 'general_sum_of_even_powers')
def test_linear():
assert diop_solve(x) == (0,)
assert diop_solve(1*x) == (0,)
assert diop_solve(3*x) == (0,)
assert diop_solve(x + 1) == (-1,)
assert diop_solve(2*x + 1) == (None,)
assert diop_solve(2*x + 4) == (-2,)
assert diop_solve(y + x) == (t_0, -t_0)
assert diop_solve(y + x + 0) == (t_0, -t_0)
assert diop_solve(y + x - 0) == (t_0, -t_0)
assert diop_solve(0*x - y - 5) == (-5,)
assert diop_solve(3*y + 2*x - 5) == (3*t_0 - 5, -2*t_0 + 5)
assert diop_solve(2*x - 3*y - 5) == (3*t_0 - 5, 2*t_0 - 5)
assert diop_solve(-2*x - 3*y - 5) == (3*t_0 + 5, -2*t_0 - 5)
assert diop_solve(7*x + 5*y) == (5*t_0, -7*t_0)
assert diop_solve(2*x + 4*y) == (2*t_0, -t_0)
assert diop_solve(4*x + 6*y - 4) == (3*t_0 - 2, -2*t_0 + 2)
assert diop_solve(4*x + 6*y - 3) == (None, None)
assert diop_solve(0*x + 3*y - 4*z + 5) == (4*t_0 + 5, 3*t_0 + 5)
assert diop_solve(4*x + 3*y - 4*z + 5) == (t_0, 8*t_0 + 4*t_1 + 5, 7*t_0 + 3*t_1 + 5)
assert diop_solve(4*x + 3*y - 4*z + 5, None) == (0, 5, 5)
assert diop_solve(4*x + 2*y + 8*z - 5) == (None, None, None)
assert diop_solve(5*x + 7*y - 2*z - 6) == (t_0, -3*t_0 + 2*t_1 + 6, -8*t_0 + 7*t_1 + 18)
assert diop_solve(3*x - 6*y + 12*z - 9) == (2*t_0 + 3, t_0 + 2*t_1, t_1)
assert diop_solve(6*w + 9*x + 20*y - z) == (t_0, t_1, t_1 + t_2, 6*t_0 + 29*t_1 + 20*t_2)
# to ignore constant factors, use diophantine
raises(TypeError, lambda: diop_solve(x/2))
def test_quadratic_simple_hyperbolic_case():
# Simple Hyperbolic case: A = C = 0 and B != 0
assert diop_solve(3*x*y + 34*x - 12*y + 1) == \
set([(-133, -11), (5, -57)])
assert diop_solve(6*x*y + 2*x + 3*y + 1) == set([])
assert diop_solve(-13*x*y + 2*x - 4*y - 54) == set([(27, 0)])
assert diop_solve(-27*x*y - 30*x - 12*y - 54) == set([(-14, -1)])
assert diop_solve(2*x*y + 5*x + 56*y + 7) == set([(-161, -3),\
(-47,-6), (-35, -12), (-29, -69),\
(-27, 64), (-21, 7),(-9, 1),\
(105, -2)])
assert diop_solve(6*x*y + 9*x + 2*y + 3) == set([])
assert diop_solve(x*y + x + y + 1) == set([(-1, t), (t, -1)])
assert diophantine(48*x*y)
def test_quadratic_elliptical_case():
# Elliptical case: B**2 - 4AC < 0
# Two test cases highlighted require lot of memory due to quadratic_congruence() method.
# This above method should be replaced by Pernici's square_mod() method when his PR gets merged.
#assert diop_solve(42*x**2 + 8*x*y + 15*y**2 + 23*x + 17*y - 4915) == set([(-11, -1)])
assert diop_solve(4*x**2 + 3*y**2 + 5*x - 11*y + 12) == set([])
assert diop_solve(x**2 + y**2 + 2*x + 2*y + 2) == set([(-1, -1)])
#assert diop_solve(15*x**2 - 9*x*y + 14*y**2 - 23*x - 14*y - 4950) == set([(-15, 6)])
assert diop_solve(10*x**2 + 12*x*y + 12*y**2 - 34) == \
set([(-1, -1), (-1, 2), (1, -2), (1, 1)])
def test_quadratic_parabolic_case():
# Parabolic case: B**2 - 4AC = 0
assert check_solutions(8*x**2 - 24*x*y + 18*y**2 + 5*x + 7*y + 16)
assert check_solutions(8*x**2 - 24*x*y + 18*y**2 + 6*x + 12*y - 6)
assert check_solutions(8*x**2 + 24*x*y + 18*y**2 + 4*x + 6*y - 7)
assert check_solutions(-4*x**2 + 4*x*y - y**2 + 2*x - 3)
assert check_solutions(x**2 + 2*x*y + y**2 + 2*x + 2*y + 1)
assert check_solutions(x**2 - 2*x*y + y**2 + 2*x + 2*y + 1)
assert check_solutions(y**2 - 41*x + 40)
def test_quadratic_perfect_square():
# B**2 - 4*A*C > 0
# B**2 - 4*A*C is a perfect square
assert check_solutions(48*x*y)
assert check_solutions(4*x**2 - 5*x*y + y**2 + 2)
assert check_solutions(-2*x**2 - 3*x*y + 2*y**2 -2*x - 17*y + 25)
assert check_solutions(12*x**2 + 13*x*y + 3*y**2 - 2*x + 3*y - 12)
assert check_solutions(8*x**2 + 10*x*y + 2*y**2 - 32*x - 13*y - 23)
assert check_solutions(4*x**2 - 4*x*y - 3*y- 8*x - 3)
assert check_solutions(- 4*x*y - 4*y**2 - 3*y- 5*x - 10)
assert check_solutions(x**2 - y**2 - 2*x - 2*y)
assert check_solutions(x**2 - 9*y**2 - 2*x - 6*y)
assert check_solutions(4*x**2 - 9*y**2 - 4*x - 12*y - 3)
def test_quadratic_non_perfect_square():
# B**2 - 4*A*C is not a perfect square
# Used check_solutions() since the solutions are complex expressions involving
# square roots and exponents
assert check_solutions(x**2 - 2*x - 5*y**2)
assert check_solutions(3*x**2 - 2*y**2 - 2*x - 2*y)
assert check_solutions(x**2 - x*y - y**2 - 3*y)
assert check_solutions(x**2 - 9*y**2 - 2*x - 6*y)
def test_issue_9106():
eq = -48 - 2*x*(3*x - 1) + y*(3*y - 1)
v = (x, y)
for sol in diophantine(eq):
assert not diop_simplify(eq.xreplace(dict(zip(v, sol))))
def test_issue_18138():
eq = x**2 - x - y**2
v = (x, y)
for sol in diophantine(eq):
assert not diop_simplify(eq.xreplace(dict(zip(v, sol))))
@slow
def test_quadratic_non_perfect_slow():
assert check_solutions(8*x**2 + 10*x*y - 2*y**2 - 32*x - 13*y - 23)
# This leads to very large numbers.
# assert check_solutions(5*x**2 - 13*x*y + y**2 - 4*x - 4*y - 15)
assert check_solutions(-3*x**2 - 2*x*y + 7*y**2 - 5*x - 7)
assert check_solutions(-4 - x + 4*x**2 - y - 3*x*y - 4*y**2)
assert check_solutions(1 + 2*x + 2*x**2 + 2*y + x*y - 2*y**2)
def test_DN():
# Most of the test cases were adapted from,
# Solving the generalized Pell equation x**2 - D*y**2 = N, John P. Robertson, July 31, 2004.
# http://www.jpr2718.org/pell.pdf
# others are verified using Wolfram Alpha.
# Covers cases where D <= 0 or D > 0 and D is a square or N = 0
# Solutions are straightforward in these cases.
assert diop_DN(3, 0) == [(0, 0)]
assert diop_DN(-17, -5) == []
assert diop_DN(-19, 23) == [(2, 1)]
assert diop_DN(-13, 17) == [(2, 1)]
assert diop_DN(-15, 13) == []
assert diop_DN(0, 5) == []
assert diop_DN(0, 9) == [(3, t)]
assert diop_DN(9, 0) == [(3*t, t)]
assert diop_DN(16, 24) == []
assert diop_DN(9, 180) == [(18, 4)]
assert diop_DN(9, -180) == [(12, 6)]
assert diop_DN(7, 0) == [(0, 0)]
# When equation is x**2 + y**2 = N
# Solutions are interchangeable
assert diop_DN(-1, 5) == [(2, 1), (1, 2)]
assert diop_DN(-1, 169) == [(12, 5), (5, 12), (13, 0), (0, 13)]
# D > 0 and D is not a square
# N = 1
assert diop_DN(13, 1) == [(649, 180)]
assert diop_DN(980, 1) == [(51841, 1656)]
assert diop_DN(981, 1) == [(158070671986249, 5046808151700)]
assert diop_DN(986, 1) == [(49299, 1570)]
assert diop_DN(991, 1) == [(379516400906811930638014896080, 12055735790331359447442538767)]
assert diop_DN(17, 1) == [(33, 8)]
assert diop_DN(19, 1) == [(170, 39)]
# N = -1
assert diop_DN(13, -1) == [(18, 5)]
assert diop_DN(991, -1) == []
assert diop_DN(41, -1) == [(32, 5)]
assert diop_DN(290, -1) == [(17, 1)]
assert diop_DN(21257, -1) == [(13913102721304, 95427381109)]
assert diop_DN(32, -1) == []
# |N| > 1
# Some tests were created using calculator at
# http://www.numbertheory.org/php/patz.html
assert diop_DN(13, -4) == [(3, 1), (393, 109), (36, 10)]
# Source I referred returned (3, 1), (393, 109) and (-3, 1) as fundamental solutions
# So (-3, 1) and (393, 109) should be in the same equivalent class
assert equivalent(-3, 1, 393, 109, 13, -4) == True
assert diop_DN(13, 27) == [(220, 61), (40, 11), (768, 213), (12, 3)]
assert set(diop_DN(157, 12)) == \
set([(13, 1), (10663, 851), (579160, 46222), \
(483790960,38610722), (26277068347, 2097138361), (21950079635497, 1751807067011)])
assert diop_DN(13, 25) == [(3245, 900)]
assert diop_DN(192, 18) == []
assert diop_DN(23, 13) == [(-6, 1), (6, 1)]
assert diop_DN(167, 2) == [(13, 1)]
assert diop_DN(167, -2) == []
assert diop_DN(123, -2) == [(11, 1)]
# One calculator returned [(11, 1), (-11, 1)] but both of these are in
# the same equivalence class
assert equivalent(11, 1, -11, 1, 123, -2)
assert diop_DN(123, -23) == [(-10, 1), (10, 1)]
assert diop_DN(0, 0, t) == [(0, t)]
assert diop_DN(0, -1, t) == []
def test_bf_pell():
assert diop_bf_DN(13, -4) == [(3, 1), (-3, 1), (36, 10)]
assert diop_bf_DN(13, 27) == [(12, 3), (-12, 3), (40, 11), (-40, 11)]
assert diop_bf_DN(167, -2) == []
assert diop_bf_DN(1729, 1) == [(44611924489705, 1072885712316)]
assert diop_bf_DN(89, -8) == [(9, 1), (-9, 1)]
assert diop_bf_DN(21257, -1) == [(13913102721304, 95427381109)]
assert diop_bf_DN(340, -4) == [(756, 41)]
assert diop_bf_DN(-1, 0, t) == [(0, 0)]
assert diop_bf_DN(0, 0, t) == [(0, t)]
assert diop_bf_DN(4, 0, t) == [(2*t, t), (-2*t, t)]
assert diop_bf_DN(3, 0, t) == [(0, 0)]
assert diop_bf_DN(1, -2, t) == []
def test_length():
assert length(2, 1, 0) == 1
assert length(-2, 4, 5) == 3
assert length(-5, 4, 17) == 4
assert length(0, 4, 13) == 6
assert length(7, 13, 11) == 23
assert length(1, 6, 4) == 2
def is_pell_transformation_ok(eq):
"""
Test whether X*Y, X, or Y terms are present in the equation
after transforming the equation using the transformation returned
by transformation_to_pell(). If they are not present we are good.
Moreover, coefficient of X**2 should be a divisor of coefficient of
Y**2 and the constant term.
"""
A, B = transformation_to_DN(eq)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
simplified = diop_simplify(eq.subs(zip((x, y), (u, v))))
coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args])
for term in [X*Y, X, Y]:
if term in coeff.keys():
return False
for term in [X**2, Y**2, 1]:
if term not in coeff.keys():
coeff[term] = 0
if coeff[X**2] != 0:
return divisible(coeff[Y**2], coeff[X**2]) and \
divisible(coeff[1], coeff[X**2])
return True
def test_transformation_to_pell():
assert is_pell_transformation_ok(-13*x**2 - 7*x*y + y**2 + 2*x - 2*y - 14)
assert is_pell_transformation_ok(-17*x**2 + 19*x*y - 7*y**2 - 5*x - 13*y - 23)
assert is_pell_transformation_ok(x**2 - y**2 + 17)
assert is_pell_transformation_ok(-x**2 + 7*y**2 - 23)
assert is_pell_transformation_ok(25*x**2 - 45*x*y + 5*y**2 - 5*x - 10*y + 5)
assert is_pell_transformation_ok(190*x**2 + 30*x*y + y**2 - 3*y - 170*x - 130)
assert is_pell_transformation_ok(x**2 - 2*x*y -190*y**2 - 7*y - 23*x - 89)
assert is_pell_transformation_ok(15*x**2 - 9*x*y + 14*y**2 - 23*x - 14*y - 4950)
def test_find_DN():
assert find_DN(x**2 - 2*x - y**2) == (1, 1)
assert find_DN(x**2 - 3*y**2 - 5) == (3, 5)
assert find_DN(x**2 - 2*x*y - 4*y**2 - 7) == (5, 7)
assert find_DN(4*x**2 - 8*x*y - y**2 - 9) == (20, 36)
assert find_DN(7*x**2 - 2*x*y - y**2 - 12) == (8, 84)
assert find_DN(-3*x**2 + 4*x*y -y**2) == (1, 0)
assert find_DN(-13*x**2 - 7*x*y + y**2 + 2*x - 2*y -14) == (101, -7825480)
def test_ldescent():
# Equations which have solutions
u = ([(13, 23), (3, -11), (41, -113), (4, -7), (-7, 4), (91, -3), (1, 1), (1, -1),
(4, 32), (17, 13), (123689, 1), (19, -570)])
for a, b in u:
w, x, y = ldescent(a, b)
assert a*x**2 + b*y**2 == w**2
assert ldescent(-1, -1) is None
def test_diop_ternary_quadratic_normal():
assert check_solutions(234*x**2 - 65601*y**2 - z**2)
assert check_solutions(23*x**2 + 616*y**2 - z**2)
assert check_solutions(5*x**2 + 4*y**2 - z**2)
assert check_solutions(3*x**2 + 6*y**2 - 3*z**2)
assert check_solutions(x**2 + 3*y**2 - z**2)
assert check_solutions(4*x**2 + 5*y**2 - z**2)
assert check_solutions(x**2 + y**2 - z**2)
assert check_solutions(16*x**2 + y**2 - 25*z**2)
assert check_solutions(6*x**2 - y**2 + 10*z**2)
assert check_solutions(213*x**2 + 12*y**2 - 9*z**2)
assert check_solutions(34*x**2 - 3*y**2 - 301*z**2)
assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2)
def is_normal_transformation_ok(eq):
A = transformation_to_normal(eq)
X, Y, Z = A*Matrix([x, y, z])
simplified = diop_simplify(eq.subs(zip((x, y, z), (X, Y, Z))))
coeff = dict([reversed(t.as_independent(*[X, Y, Z])) for t in simplified.args])
for term in [X*Y, Y*Z, X*Z]:
if term in coeff.keys():
return False
return True
def test_transformation_to_normal():
assert is_normal_transformation_ok(x**2 + 3*y**2 + z**2 - 13*x*y - 16*y*z + 12*x*z)
assert is_normal_transformation_ok(x**2 + 3*y**2 - 100*z**2)
assert is_normal_transformation_ok(x**2 + 23*y*z)
assert is_normal_transformation_ok(3*y**2 - 100*z**2 - 12*x*y)
assert is_normal_transformation_ok(x**2 + 23*x*y - 34*y*z + 12*x*z)
assert is_normal_transformation_ok(z**2 + 34*x*y - 23*y*z + x*z)
assert is_normal_transformation_ok(x**2 + y**2 + z**2 - x*y - y*z - x*z)
assert is_normal_transformation_ok(x**2 + 2*y*z + 3*z**2)
assert is_normal_transformation_ok(x*y + 2*x*z + 3*y*z)
assert is_normal_transformation_ok(2*x*z + 3*y*z)
def test_diop_ternary_quadratic():
assert check_solutions(2*x**2 + z**2 + y**2 - 4*x*y)
assert check_solutions(x**2 - y**2 - z**2 - x*y - y*z)
assert check_solutions(3*x**2 - x*y - y*z - x*z)
assert check_solutions(x**2 - y*z - x*z)
assert check_solutions(5*x**2 - 3*x*y - x*z)
assert check_solutions(4*x**2 - 5*y**2 - x*z)
assert check_solutions(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
assert check_solutions(8*x**2 - 12*y*z)
assert check_solutions(45*x**2 - 7*y**2 - 8*x*y - z**2)
assert check_solutions(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
assert check_solutions(90*x**2 + 3*y**2 + 5*x*y + 2*z*y + 5*x*z)
assert check_solutions(x**2 + 3*y**2 + z**2 - x*y - 17*y*z)
assert check_solutions(x**2 + 3*y**2 + z**2 - x*y - 16*y*z + 12*x*z)
assert check_solutions(x**2 + 3*y**2 + z**2 - 13*x*y - 16*y*z + 12*x*z)
assert check_solutions(x*y - 7*y*z + 13*x*z)
assert diop_ternary_quadratic_normal(x**2 + y**2 + z**2) == (None, None, None)
assert diop_ternary_quadratic_normal(x**2 + y**2) is None
raises(ValueError, lambda:
_diop_ternary_quadratic_normal((x, y, z),
{x*y: 1, x**2: 2, y**2: 3, z**2: 0}))
eq = -2*x*y - 6*x*z + 7*y**2 - 3*y*z + 4*z**2
assert diop_ternary_quadratic(eq) == (7, 2, 0)
assert diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2) == \
(1, 0, 2)
assert diop_ternary_quadratic(x*y + 2*y*z) == \
(-2, 0, n1)
eq = -5*x*y - 8*x*z - 3*y*z + 8*z**2
assert parametrize_ternary_quadratic(eq) == \
(8*p**2 - 3*p*q, -8*p*q + 8*q**2, 5*p*q)
# this cannot be tested with diophantine because it will
# factor into a product
assert diop_solve(x*y + 2*y*z) == (-2*p*q, -n1*p**2 + p**2, p*q)
def test_square_factor():
assert square_factor(1) == square_factor(-1) == 1
assert square_factor(0) == 1
assert square_factor(5) == square_factor(-5) == 1
assert square_factor(4) == square_factor(-4) == 2
assert square_factor(12) == square_factor(-12) == 2
assert square_factor(6) == 1
assert square_factor(18) == 3
assert square_factor(52) == 2
assert square_factor(49) == 7
assert square_factor(392) == 14
assert square_factor(factorint(-12)) == 2
def test_parametrize_ternary_quadratic():
assert check_solutions(x**2 + y**2 - z**2)
assert check_solutions(x**2 + 2*x*y + z**2)
assert check_solutions(234*x**2 - 65601*y**2 - z**2)
assert check_solutions(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
assert check_solutions(x**2 - y**2 - z**2)
assert check_solutions(x**2 - 49*y**2 - z**2 + 13*z*y - 8*x*y)
assert check_solutions(8*x*y + z**2)
assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2)
assert check_solutions(236*x**2 - 225*y**2 - 11*x*y - 13*y*z - 17*x*z)
assert check_solutions(90*x**2 + 3*y**2 + 5*x*y + 2*z*y + 5*x*z)
assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2)
def test_no_square_ternary_quadratic():
assert check_solutions(2*x*y + y*z - 3*x*z)
assert check_solutions(189*x*y - 345*y*z - 12*x*z)
assert check_solutions(23*x*y + 34*y*z)
assert check_solutions(x*y + y*z + z*x)
assert check_solutions(23*x*y + 23*y*z + 23*x*z)
def test_descent():
u = ([(13, 23), (3, -11), (41, -113), (91, -3), (1, 1), (1, -1), (17, 13), (123689, 1), (19, -570)])
for a, b in u:
w, x, y = descent(a, b)
assert a*x**2 + b*y**2 == w**2
# the docstring warns against bad input, so these are expected results
# - can't both be negative
raises(TypeError, lambda: descent(-1, -3))
# A can't be zero unless B != 1
raises(ZeroDivisionError, lambda: descent(0, 3))
# supposed to be square-free
raises(TypeError, lambda: descent(4, 3))
def test_diophantine():
assert check_solutions((x - y)*(y - z)*(z - x))
assert check_solutions((x - y)*(x**2 + y**2 - z**2))
assert check_solutions((x - 3*y + 7*z)*(x**2 + y**2 - z**2))
assert check_solutions((x**2 - 3*y**2 - 1))
assert check_solutions(y**2 + 7*x*y)
assert check_solutions(x**2 - 3*x*y + y**2)
assert check_solutions(z*(x**2 - y**2 - 15))
assert check_solutions(x*(2*y - 2*z + 5))
assert check_solutions((x**2 - 3*y**2 - 1)*(x**2 - y**2 - 15))
assert check_solutions((x**2 - 3*y**2 - 1)*(y - 7*z))
assert check_solutions((x**2 + y**2 - z**2)*(x - 7*y - 3*z + 4*w))
# Following test case caused problems in parametric representation
# But this can be solved by factoring out y.
# No need to use methods for ternary quadratic equations.
assert check_solutions(y**2 - 7*x*y + 4*y*z)
assert check_solutions(x**2 - 2*x + 1)
assert diophantine(x - y) == diophantine(Eq(x, y))
# 18196
eq = x**4 + y**4 - 97
assert diophantine(eq, permute=True) == diophantine(-eq, permute=True)
assert diophantine(3*x*pi - 2*y*pi) == set([(2*t_0, 3*t_0)])
eq = x**2 + y**2 + z**2 - 14
base_sol = set([(1, 2, 3)])
assert diophantine(eq) == base_sol
complete_soln = set(signed_permutations(base_sol.pop()))
assert diophantine(eq, permute=True) == complete_soln
assert diophantine(x**2 + x*Rational(15, 14) - 3) == set()
# test issue 11049
eq = 92*x**2 - 99*y**2 - z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(9, 7, 51)
assert diophantine(eq) == set([(
891*p**2 + 9*q**2, -693*p**2 - 102*p*q + 7*q**2,
5049*p**2 - 1386*p*q - 51*q**2)])
eq = 2*x**2 + 2*y**2 - z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(1, 1, 2)
assert diophantine(eq) == set([(
2*p**2 - q**2, -2*p**2 + 4*p*q - q**2,
4*p**2 - 4*p*q + 2*q**2)])
eq = 411*x**2+57*y**2-221*z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(2021, 2645, 3066)
assert diophantine(eq) == \
set([(115197*p**2 - 446641*q**2, -150765*p**2 + 1355172*p*q -
584545*q**2, 174762*p**2 - 301530*p*q + 677586*q**2)])
eq = 573*x**2+267*y**2-984*z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(49, 233, 127)
assert diophantine(eq) == \
set([(4361*p**2 - 16072*q**2, -20737*p**2 + 83312*p*q - 76424*q**2,
11303*p**2 - 41474*p*q + 41656*q**2)])
# this produces factors during reconstruction
eq = x**2 + 3*y**2 - 12*z**2
coeff = eq.as_coefficients_dict()
assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \
(0, 2, 1)
assert diophantine(eq) == \
set([(24*p*q, 2*p**2 - 24*q**2, p**2 + 12*q**2)])
# solvers have not been written for every type
raises(NotImplementedError, lambda: diophantine(x*y**2 + 1))
# rational expressions
assert diophantine(1/x) == set()
assert diophantine(1/x + 1/y - S.Half)
set([(6, 3), (-2, 1), (4, 4), (1, -2), (3, 6)])
assert diophantine(x**2 + y**2 +3*x- 5, permute=True) == \
set([(-1, 1), (-4, -1), (1, -1), (1, 1), (-4, 1), (-1, -1), (4, 1), (4, -1)])
#test issue 18186
assert diophantine(y**4 + x**4 - 2**4 - 3**4, syms=(x, y), permute=True) == \
set([(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)])
assert diophantine(y**4 + x**4 - 2**4 - 3**4, syms=(y, x), permute=True) == \
set([(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)])
# issue 18122
assert check_solutions(x**2-y)
assert check_solutions(y**2-x)
assert diophantine((x**2-y), t) == set([(t, t**2)])
assert diophantine((y**2-x), t) == set([(t**2, -t)])
def test_general_pythagorean():
from sympy.abc import a, b, c, d, e
assert check_solutions(a**2 + b**2 + c**2 - d**2)
assert check_solutions(a**2 + 4*b**2 + 4*c**2 - d**2)
assert check_solutions(9*a**2 + 4*b**2 + 4*c**2 - d**2)
assert check_solutions(9*a**2 + 4*b**2 - 25*d**2 + 4*c**2 )
assert check_solutions(9*a**2 - 16*d**2 + 4*b**2 + 4*c**2)
assert check_solutions(-e**2 + 9*a**2 + 4*b**2 + 4*c**2 + 25*d**2)
assert check_solutions(16*a**2 - b**2 + 9*c**2 + d**2 + 25*e**2)
def test_diop_general_sum_of_squares_quick():
for i in range(3, 10):
assert check_solutions(sum(i**2 for i in symbols(':%i' % i)) - i)
raises(ValueError, lambda: _diop_general_sum_of_squares((x, y), 2))
assert _diop_general_sum_of_squares((x, y, z), -2) == set()
eq = x**2 + y**2 + z**2 - (1 + 4 + 9)
assert diop_general_sum_of_squares(eq) == \
set([(1, 2, 3)])
eq = u**2 + v**2 + x**2 + y**2 + z**2 - 1313
assert len(diop_general_sum_of_squares(eq, 3)) == 3
# issue 11016
var = symbols(':5') + (symbols('6', negative=True),)
eq = Add(*[i**2 for i in var]) - 112
base_soln = set(
[(0, 1, 1, 5, 6, -7), (1, 1, 1, 3, 6, -8), (2, 3, 3, 4, 5, -7),
(0, 1, 1, 1, 3, -10), (0, 0, 4, 4, 4, -8), (1, 2, 3, 3, 5, -8),
(0, 1, 2, 3, 7, -7), (2, 2, 4, 4, 6, -6), (1, 1, 3, 4, 6, -7),
(0, 2, 3, 3, 3, -9), (0, 0, 2, 2, 2, -10), (1, 1, 2, 3, 4, -9),
(0, 1, 1, 2, 5, -9), (0, 0, 2, 6, 6, -6), (1, 3, 4, 5, 5, -6),
(0, 2, 2, 2, 6, -8), (0, 3, 3, 3, 6, -7), (0, 2, 3, 5, 5, -7),
(0, 1, 5, 5, 5, -6)])
assert diophantine(eq) == base_soln
assert len(diophantine(eq, permute=True)) == 196800
# handle negated squares with signsimp
assert diophantine(12 - x**2 - y**2 - z**2) == set([(2, 2, 2)])
# diophantine handles simplification, so classify_diop should
# not have to look for additional patterns that are removed
# by diophantine
eq = a**2 + b**2 + c**2 + d**2 - 4
raises(NotImplementedError, lambda: classify_diop(-eq))
def test_diop_partition():
for n in [8, 10]:
for k in range(1, 8):
for p in partition(n, k):
assert len(p) == k
assert [p for p in partition(3, 5)] == []
assert [list(p) for p in partition(3, 5, 1)] == [
[0, 0, 0, 0, 3], [0, 0, 0, 1, 2], [0, 0, 1, 1, 1]]
assert list(partition(0)) == [()]
assert list(partition(1, 0)) == [()]
assert [list(i) for i in partition(3)] == [[1, 1, 1], [1, 2], [3]]
def test_prime_as_sum_of_two_squares():
for i in [5, 13, 17, 29, 37, 41, 2341, 3557, 34841, 64601]:
a, b = prime_as_sum_of_two_squares(i)
assert a**2 + b**2 == i
assert prime_as_sum_of_two_squares(7) is None
ans = prime_as_sum_of_two_squares(800029)
assert ans == (450, 773) and type(ans[0]) is int
def test_sum_of_three_squares():
for i in [0, 1, 2, 34, 123, 34304595905, 34304595905394941, 343045959052344,
800, 801, 802, 803, 804, 805, 806]:
a, b, c = sum_of_three_squares(i)
assert a**2 + b**2 + c**2 == i
assert sum_of_three_squares(7) is None
assert sum_of_three_squares((4**5)*15) is None
assert sum_of_three_squares(25) == (5, 0, 0)
assert sum_of_three_squares(4) == (0, 0, 2)
def test_sum_of_four_squares():
from random import randint
# this should never fail
n = randint(1, 100000000000000)
assert sum(i**2 for i in sum_of_four_squares(n)) == n
assert sum_of_four_squares(0) == (0, 0, 0, 0)
assert sum_of_four_squares(14) == (0, 1, 2, 3)
assert sum_of_four_squares(15) == (1, 1, 2, 3)
assert sum_of_four_squares(18) == (1, 2, 2, 3)
assert sum_of_four_squares(19) == (0, 1, 3, 3)
assert sum_of_four_squares(48) == (0, 4, 4, 4)
def test_power_representation():
tests = [(1729, 3, 2), (234, 2, 4), (2, 1, 2), (3, 1, 3), (5, 2, 2), (12352, 2, 4),
(32760, 2, 3)]
for test in tests:
n, p, k = test
f = power_representation(n, p, k)
while True:
try:
l = next(f)
assert len(l) == k
chk_sum = 0
for l_i in l:
chk_sum = chk_sum + l_i**p
assert chk_sum == n
except StopIteration:
break
assert list(power_representation(20, 2, 4, True)) == \
[(1, 1, 3, 3), (0, 0, 2, 4)]
raises(ValueError, lambda: list(power_representation(1.2, 2, 2)))
raises(ValueError, lambda: list(power_representation(2, 0, 2)))
raises(ValueError, lambda: list(power_representation(2, 2, 0)))
assert list(power_representation(-1, 2, 2)) == []
assert list(power_representation(1, 1, 1)) == [(1,)]
assert list(power_representation(3, 2, 1)) == []
assert list(power_representation(4, 2, 1)) == [(2,)]
assert list(power_representation(3**4, 4, 6, zeros=True)) == \
[(1, 2, 2, 2, 2, 2), (0, 0, 0, 0, 0, 3)]
assert list(power_representation(3**4, 4, 5, zeros=False)) == []
assert list(power_representation(-2, 3, 2)) == [(-1, -1)]
assert list(power_representation(-2, 4, 2)) == []
assert list(power_representation(0, 3, 2, True)) == [(0, 0)]
assert list(power_representation(0, 3, 2, False)) == []
# when we are dealing with squares, do feasibility checks
assert len(list(power_representation(4**10*(8*10 + 7), 2, 3))) == 0
# there will be a recursion error if these aren't recognized
big = 2**30
for i in [13, 10, 7, 5, 4, 2, 1]:
assert list(sum_of_powers(big, 2, big - i)) == []
def test_assumptions():
"""
Test whether diophantine respects the assumptions.
"""
#Test case taken from the below so question regarding assumptions in diophantine module
#https://stackoverflow.com/questions/23301941/how-can-i-declare-natural-symbols-with-sympy
m, n = symbols('m n', integer=True, positive=True)
diof = diophantine(n ** 2 + m * n - 500)
assert diof == set([(5, 20), (40, 10), (95, 5), (121, 4), (248, 2), (499, 1)])
a, b = symbols('a b', integer=True, positive=False)
diof = diophantine(a*b + 2*a + 3*b - 6)
assert diof == set([(-15, -3), (-9, -4), (-7, -5), (-6, -6), (-5, -8), (-4, -14)])
def check_solutions(eq):
"""
Determines whether solutions returned by diophantine() satisfy the original
equation. Hope to generalize this so we can remove functions like check_ternay_quadratic,
check_solutions_normal, check_solutions()
"""
s = diophantine(eq)
factors = Mul.make_args(eq)
var = list(eq.free_symbols)
var.sort(key=default_sort_key)
while s:
solution = s.pop()
for f in factors:
if diop_simplify(f.subs(zip(var, solution))) == 0:
break
else:
return False
return True
def test_diopcoverage():
eq = (2*x + y + 1)**2
assert diop_solve(eq) == set([(t_0, -2*t_0 - 1)])
eq = 2*x**2 + 6*x*y + 12*x + 4*y**2 + 18*y + 18
assert diop_solve(eq) == set([(t_0, -t_0 - 3), (2*t_0 - 3, -t_0)])
assert diop_quadratic(x + y**2 - 3) == set([(-t**2 + 3, -t)])
assert diop_linear(x + y - 3) == (t_0, 3 - t_0)
assert base_solution_linear(0, 1, 2, t=None) == (0, 0)
ans = (3*t - 1, -2*t + 1)
assert base_solution_linear(4, 8, 12, t) == ans
assert base_solution_linear(4, 8, 12, t=None) == tuple(_.subs(t, 0) for _ in ans)
assert cornacchia(1, 1, 20) is None
assert cornacchia(1, 1, 5) == set([(2, 1)])
assert cornacchia(1, 2, 17) == set([(3, 2)])
raises(ValueError, lambda: reconstruct(4, 20, 1))
assert gaussian_reduce(4, 1, 3) == (1, 1)
eq = -w**2 - x**2 - y**2 + z**2
assert diop_general_pythagorean(eq) == \
diop_general_pythagorean(-eq) == \
(m1**2 + m2**2 - m3**2, 2*m1*m3,
2*m2*m3, m1**2 + m2**2 + m3**2)
assert check_param(S(3) + x/3, S(4) + x/2, S(2), x) == (None, None)
assert check_param(Rational(3, 2), S(4) + x, S(2), x) == (None, None)
assert check_param(S(4) + x, Rational(3, 2), S(2), x) == (None, None)
assert _nint_or_floor(16, 10) == 2
assert _odd(1) == (not _even(1)) == True
assert _odd(0) == (not _even(0)) == False
assert _remove_gcd(2, 4, 6) == (1, 2, 3)
raises(TypeError, lambda: _remove_gcd((2, 4, 6)))
assert sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11) == \
(11, 1, 5)
# it's ok if these pass some day when the solvers are implemented
raises(NotImplementedError, lambda: diophantine(x**2 + y**2 + x*y + 2*y*z - 12))
raises(NotImplementedError, lambda: diophantine(x**3 + y**2))
assert diop_quadratic(x**2 + y**2 - 1**2 - 3**4) == \
set([(-9, -1), (-9, 1), (-1, -9), (-1, 9), (1, -9), (1, 9), (9, -1), (9, 1)])
def test_holzer():
# if the input is good, don't let it diverge in holzer()
# (but see test_fail_holzer below)
assert holzer(2, 7, 13, 4, 79, 23) == (2, 7, 13)
# None in uv condition met; solution is not Holzer reduced
# so this will hopefully change but is here for coverage
assert holzer(2, 6, 2, 1, 1, 10) == (2, 6, 2)
raises(ValueError, lambda: holzer(2, 7, 14, 4, 79, 23))
@XFAIL
def test_fail_holzer():
eq = lambda x, y, z: a*x**2 + b*y**2 - c*z**2
a, b, c = 4, 79, 23
x, y, z = xyz = 26, 1, 11
X, Y, Z = ans = 2, 7, 13
assert eq(*xyz) == 0
assert eq(*ans) == 0
assert max(a*x**2, b*y**2, c*z**2) <= a*b*c
assert max(a*X**2, b*Y**2, c*Z**2) <= a*b*c
h = holzer(x, y, z, a, b, c)
assert h == ans # it would be nice to get the smaller soln
def test_issue_9539():
assert diophantine(6*w + 9*y + 20*x - z) == \
set([(t_0, t_1, t_1 + t_2, 6*t_0 + 29*t_1 + 9*t_2)])
def test_issue_8943():
assert diophantine(
(3*(x**2 + y**2 + z**2) - 14*(x*y + y*z + z*x))) == \
set([(0, 0, 0)])
def test_diop_sum_of_even_powers():
eq = x**4 + y**4 + z**4 - 2673
assert diop_solve(eq) == set([(3, 6, 6), (2, 4, 7)])
assert diop_general_sum_of_even_powers(eq, 2) == set(
[(3, 6, 6), (2, 4, 7)])
raises(NotImplementedError, lambda: diop_general_sum_of_even_powers(-eq, 2))
neg = symbols('neg', negative=True)
eq = x**4 + y**4 + neg**4 - 2673
assert diop_general_sum_of_even_powers(eq) == set([(-3, 6, 6)])
assert diophantine(x**4 + y**4 + 2) == set()
assert diop_general_sum_of_even_powers(x**4 + y**4 - 2, limit=0) == set()
def test_sum_of_squares_powers():
tru = set([
(0, 0, 1, 1, 11), (0, 0, 5, 7, 7), (0, 1, 3, 7, 8), (0, 1, 4, 5, 9),
(0, 3, 4, 7, 7), (0, 3, 5, 5, 8), (1, 1, 2, 6, 9), (1, 1, 6, 6, 7),
(1, 2, 3, 3, 10), (1, 3, 4, 4, 9), (1, 5, 5, 6, 6), (2, 2, 3, 5, 9),
(2, 3, 5, 6, 7), (3, 3, 4, 5, 8)])
eq = u**2 + v**2 + x**2 + y**2 + z**2 - 123
ans = diop_general_sum_of_squares(eq, oo) # allow oo to be used
assert len(ans) == 14
assert ans == tru
raises(ValueError, lambda: list(sum_of_squares(10, -1)))
assert list(sum_of_squares(-10, 2)) == []
assert list(sum_of_squares(2, 3)) == []
assert list(sum_of_squares(0, 3, True)) == [(0, 0, 0)]
assert list(sum_of_squares(0, 3)) == []
assert list(sum_of_squares(4, 1)) == [(2,)]
assert list(sum_of_squares(5, 1)) == []
assert list(sum_of_squares(50, 2)) == [(5, 5), (1, 7)]
assert list(sum_of_squares(11, 5, True)) == [
(1, 1, 1, 2, 2), (0, 0, 1, 1, 3)]
assert list(sum_of_squares(8, 8)) == [(1, 1, 1, 1, 1, 1, 1, 1)]
assert [len(list(sum_of_squares(i, 5, True))) for i in range(30)] == [
1, 1, 1, 1, 2,
2, 1, 1, 2, 2,
2, 2, 2, 3, 2,
1, 3, 3, 3, 3,
4, 3, 3, 2, 2,
4, 4, 4, 4, 5]
assert [len(list(sum_of_squares(i, 5))) for i in range(30)] == [
0, 0, 0, 0, 0,
1, 0, 0, 1, 0,
0, 1, 0, 1, 1,
0, 1, 1, 0, 1,
2, 1, 1, 1, 1,
1, 1, 1, 1, 3]
for i in range(30):
s1 = set(sum_of_squares(i, 5, True))
assert not s1 or all(sum(j**2 for j in t) == i for t in s1)
s2 = set(sum_of_squares(i, 5))
assert all(sum(j**2 for j in t) == i for t in s2)
raises(ValueError, lambda: list(sum_of_powers(2, -1, 1)))
raises(ValueError, lambda: list(sum_of_powers(2, 1, -1)))
assert list(sum_of_powers(-2, 3, 2)) == [(-1, -1)]
assert list(sum_of_powers(-2, 4, 2)) == []
assert list(sum_of_powers(2, 1, 1)) == [(2,)]
assert list(sum_of_powers(2, 1, 3, True)) == [(0, 0, 2), (0, 1, 1)]
assert list(sum_of_powers(5, 1, 2, True)) == [(0, 5), (1, 4), (2, 3)]
assert list(sum_of_powers(6, 2, 2)) == []
assert list(sum_of_powers(3**5, 3, 1)) == []
assert list(sum_of_powers(3**6, 3, 1)) == [(9,)] and (9**3 == 3**6)
assert list(sum_of_powers(2**1000, 5, 2)) == []
def test__can_do_sum_of_squares():
assert _can_do_sum_of_squares(3, -1) is False
assert _can_do_sum_of_squares(-3, 1) is False
assert _can_do_sum_of_squares(0, 1)
assert _can_do_sum_of_squares(4, 1)
assert _can_do_sum_of_squares(1, 2)
assert _can_do_sum_of_squares(2, 2)
assert _can_do_sum_of_squares(3, 2) is False
def test_diophantine_permute_sign():
from sympy.abc import a, b, c, d, e
eq = a**4 + b**4 - (2**4 + 3**4)
base_sol = set([(2, 3)])
assert diophantine(eq) == base_sol
complete_soln = set(signed_permutations(base_sol.pop()))
assert diophantine(eq, permute=True) == complete_soln
eq = a**2 + b**2 + c**2 + d**2 + e**2 - 234
assert len(diophantine(eq)) == 35
assert len(diophantine(eq, permute=True)) == 62000
soln = set([(-1, -1), (-1, 2), (1, -2), (1, 1)])
assert diophantine(10*x**2 + 12*x*y + 12*y**2 - 34, permute=True) == soln
@XFAIL
def test_not_implemented():
eq = x**2 + y**4 - 1**2 - 3**4
assert diophantine(eq, syms=[x, y]) == set([(9, 1), (1, 3)])
def test_issue_9538():
eq = x - 3*y + 2
assert diophantine(eq, syms=[y,x]) == set([(t_0, 3*t_0 - 2)])
raises(TypeError, lambda: diophantine(eq, syms=set([y,x])))
def test_ternary_quadratic():
# solution with 3 parameters
s = diophantine(2*x**2 + y**2 - 2*z**2)
p, q, r = ordered(S(s).free_symbols)
assert s == {(
p**2 - 2*q**2,
-2*p**2 + 4*p*q - 4*p*r - 4*q**2,
p**2 - 4*p*q + 2*q**2 - 4*q*r)}
# solution with Mul in solution
s = diophantine(x**2 + 2*y**2 - 2*z**2)
assert s == {(4*p*q, p**2 - 2*q**2, p**2 + 2*q**2)}
# solution with no Mul in solution
s = diophantine(2*x**2 + 2*y**2 - z**2)
assert s == {(2*p**2 - q**2, -2*p**2 + 4*p*q - q**2,
4*p**2 - 4*p*q + 2*q**2)}
# reduced form when parametrized
s = diophantine(3*x**2 + 72*y**2 - 27*z**2)
assert s == {(24*p**2 - 9*q**2, 6*p*q, 8*p**2 + 3*q**2)}
assert parametrize_ternary_quadratic(
3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z) == (
2*p**2 - 2*p*q - q**2, 2*p**2 + 2*p*q - q**2, 2*p**2 -
2*p*q + 3*q**2)
assert parametrize_ternary_quadratic(
124*x**2 - 30*y**2 - 7729*z**2) == (
-1410*p**2 - 363263*q**2, 2700*p**2 + 30916*p*q -
695610*q**2, -60*p**2 + 5400*p*q + 15458*q**2)
|
py | 1a4878eb8f1347bec0cc9f26f5a0a0ed67aa9c82 | """
Скрипт, выводящий информацию о таблице экспорта PE-файла.
Для каждой экспортируемой функции выводит имя функции и ее RVA. Также выводит
общее количество экспортируемых функций.
Пример использования:
python get_export_info.py d:/file.exe
"""
import sys
import pefile
try:
file_path = sys.argv[1]
except IndexError:
print('Не указан файл.')
sys.exit(0)
try:
pe = pefile.PE(file_path)
except FileNotFoundError:
print('Не удается найти указанный файл:', sys.argv[1])
sys.exit(0)
except pefile.PEFormatError:
print('Файл', sys.argv[1], 'не является PE файлом Windows.')
sys.exit(0)
print('Библиотека:', pe.DIRECTORY_ENTRY_EXPORT.name.decode('utf-8'))
if hasattr(pe, 'DIRECTORY_ENTRY_EXPORT'):
for export_entry in pe.DIRECTORY_ENTRY_EXPORT.symbols:
print('\t' + export_entry.name.decode('utf-8'))
print('\t\tОрдинал:', str(hex(export_entry.ordinal)))
print('\t\tRVA функции:', str(hex(export_entry.address)))
else:
print('Файл', sys.argv[1], 'не содержит секцию экспорта.')
print('Всего экспортируется', len(pe.DIRECTORY_ENTRY_EXPORT.symbols), 'функций.')
|
py | 1a4879e4524366ac435bcdf0ab15881e23c5432e | class Solution(object):
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
ss = []
for i in S:
if i == '(':
ss.append(-1)
else:
cur = 0
while ss[-1] != -1:
cur += ss.pop()
print cur,""
ss.pop()
if cur == 0:
ss.append(1)
else:
ss.append(2 * cur)
return sum(ss)
def run():
s = Solution()
st = "((())(()))()"
print s.scoreOfParentheses(st)
|
py | 1a487a9265c40ce57801a89e731e00aa93645f75 | # Generated by Django 2.2.1 on 2020-05-11 12:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('PropelRapp', '0010_auto_20200507_0705'),
]
operations = [
migrations.RenameModel(
old_name='RoleDetails',
new_name='Roledetail',
),
migrations.AlterModelTable(
name='roledetail',
table='Roledetail',
),
]
|
py | 1a487ae01519e9e92a3878b944036c3031d105be | import time
import cv2
class DetectHumanMovement(object):
def __init__(self):
self.faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
self.handCascade = cv2.CascadeClassifier("haarcascade_hand_default.xml")
self.video_capture = cv2.VideoCapture(0)
while True:
if not self.video_capture.isOpened():
print('Unable to load camera.')
time.sleep(2)
continue
else:
break
# To capture in main game loop
def capture_gray_image(self):
retval, frame = self.video_capture.read()
self.frame = frame
if not retval:
raise Exception('Ops! Capture image failed.')
# convert to gray scale
self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
return self.gray
# detect all faces in gray image
def detect_faces(self):
faces = self.faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(35, 35)
)
return faces
# detect all fist
def detect_fists(self):
fists = self.handCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(35, 35)
)
return fists
"""
Define Face Horizontal Orientation (Left or Right)
window (ex = 0 to WIDTH)
---------------x'-------
-----x'-----------------
x' <- 500 and
x' <- 300
p = x' - (WIDTH / 2)
"""
def face_laterality_orientation(self, face, width):
(x, y, w, h) = face
orientation = int((x + (w / 2)) - (width / 2))
return orientation
"""
Check Fist to shoot missile
"""
def fist_check(self):
fists = self.detect_fists()
return len(fists)
|
py | 1a487b0a727b391bdd4f5eff7165216753a3f543 | import os
import time
import torch
import torch.nn as nn
from model.utils.general import init_dir, get_logger
class BaseModel(object):
"""Generic class for our model
Usage:
1. init
2. build_train() or build_pred()
3. save and restore
4. train and evaluate
"""
# 1. init
def __init__(self, config, dir_output):
"""Defines self._config
Args:
config: (Config instance) class with hyper parameters, from "model.json"
dir_output: output dir
"""
self._config = config
self._dir_output = dir_output
self._init_relative_path(dir_output)
self.logger = get_logger(dir_output + "model.log")
def _init_relative_path(self, dir_output):
# init parent dir
init_dir(dir_output)
# 1. init child dir
# check dir one last time
self._dir_model = dir_output + "model_weights/"
init_dir(self._dir_model)
# 2. define model path
self._model_path = self._dir_model + "model.cpkt"
# 2. build
def build_train(self, config=None):
"""To overwrite with model-specific logic
This logic must define
- self.model_name
- self.loss
- self.lr
- etc.
Args:
config: from "training.json" and "model.json"
"""
self.logger.info("- Building model...")
self._init_model(config.model_name, config.device)
self._init_optimizer(config.lr_method, config.lr_init)
self._init_scheduler(config.lr_scheduler)
self._init_criterion(config.criterion_method)
self.logger.info("- done.")
def build_pred(self, config=None):
"""Similar to build_train but no need to define train_op
Args:
config: from "model.json"
"""
self.logger.info("- Building model...")
self._init_model(config.model_name, config.device)
self.logger.info("- done.")
def _init_model(self, model_name="CNN", device="cpu"):
self.logger.info(" - " + model_name)
self.logger.info(" - " + device)
self.device = torch.device(device if torch.cuda.is_available() else 'cpu')
self.model = self.getModel(model_name)
self.model = self.model.to(self.device)
def _init_optimizer(self, lr_method="adam", lr=1e-3):
"""Defines self.optimizer that performs an update on a batch
Args:
lr_method: (string) sgd method, for example "adam"
lr: init learning rate (initial value)
"""
# 1. optimizer
_lr_m = lr_method.lower() # lower to make sure
print(" - " + _lr_m)
self.optimizer = self.getOptimizer(_lr_m, lr)
def _init_scheduler(self, lr_scheduler="CosineAnnealingLR"):
"""Defines self.scheduler that performs an update on a batch
Args:
lr_scheduler: (string) learning rate schedule method, for example "CosineAnnealingLR"
"""
# 2. scheduler
print(" - lr_scheduler " + lr_scheduler)
self.scheduler = self.getLearningRateScheduler(lr_scheduler)
def _init_criterion(self, criterion_method="CrossEntropyLoss"):
"""Defines self.criterion that performs an update on a batch
Args:
criterion_method: (string) criterion method, for example "CrossEntropyLoss"
"""
# 3. criterion
print(" - " + criterion_method)
self.criterion = self.getCriterion(criterion_method)
# ! MUST OVERWRITE
def getModel(self, model_name="CNN"):
"""return your Model
Args:
model_name: String, from "model.json"
Returns:
your model that inherits from torch.nn
"""
raise NotImplementedError("return your model ({}) that inherits from torch.nn".format(model_name))
def getOptimizer(self, lr_method="adam", lr=1e-3):
if lr_method == 'adam':
return torch.optim.Adam(self.model.parameters(), lr=lr)
elif lr_method == 'adamax':
return torch.optim.Adamax(self.model.parameters(), lr=lr)
elif lr_method == 'sgd':
return torch.optim.SGD(self.model.parameters(), lr=lr)
else:
raise NotImplementedError("Unknown Optimizer {}".format(lr_method))
def getLearningRateScheduler(self, lr_scheduler="CosineAnnealingLR"):
if lr_scheduler == "CosineAnnealingLR":
return torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=5, eta_min=4e-08)
else:
raise NotImplementedError("Unknown Learning Rate Scheduler {}".format(lr_scheduler))
def getCriterion(self, criterion_method="CrossEntropyLoss"):
if criterion_method == 'CrossEntropyLoss':
return torch.nn.CrossEntropyLoss()
elif criterion_method == 'MSELoss':
return torch.nn.MSELoss()
elif criterion_method == 'BCEWithLogitsLoss':
return torch.nn.BCEWithLogitsLoss()
else:
raise NotImplementedError("Unknown Criterion Method {}".format(criterion_method))
# 3. save and restore
def auto_restore(self):
if os.path.exists(self._model_path) and os.path.isfile(self._model_path):
self.restore()
def restore(self, model_path=None, map_location='cpu'):
"""Reload weights into session
Args:
model_path: weights path "model_weights/model.cpkt"
map_location: 'cpu' or 'gpu:0'
"""
self.logger.info("- Reloading the latest trained model...")
if model_path == None:
self.model.load_state_dict(torch.load(self._model_path, map_location=map_location))
else:
self.model.load_state_dict(torch.load(model_path, map_location=map_location))
def save(self):
"""Saves model"""
self.logger.info("- Saving model...")
torch.save(self.model.state_dict(), self._model_path)
self.logger.info("- Saved model in {}".format(self._dir_model))
# 4. train and evaluate
def train(self, config, train_set, val_set, lr_schedule, path_label):
"""Global training procedure
Calls method self.run_epoch and saves weights if score improves.
All the epoch-logic including the lr_schedule update must be done in
self.run_epoch
Args:
config: Config instance contains params as attributes
train_set: Dataset instance
val_set: Dataset instance
lr_schedule: LRSchedule instance that takes care of learning proc
path_label: dataframe
Returns:
best_score: (float)
"""
best_score = None
for epoch in range(config.n_epochs):
# logging
tic = time.time()
self.logger.info("Epoch {:}/{:}".format(epoch + 1, config.n_epochs))
# epoch
score = self._run_train_epoch(config, train_set, val_set, epoch, lr_schedule, path_label)
# save weights if we have new best score on eval
if best_score is None or score >= best_score: # abs(score-0.5) <= abs(best_score-0.5):
best_score = score
self.logger.info("- New best score ({:04.2f})!".format(best_score))
self.save()
if lr_schedule.stop_training:
self.logger.info("- Early Stopping.")
break
# logging
toc = time.time()
self.logger.info("- Elapsed time: {:04.2f}, learning rate: {:04.5f}".format(toc - tic, lr_schedule.lr))
return best_score
def evaluate(self, config, test_set, path_label):
"""Evaluates model on test set
Calls method run_evaluate on test_set and takes care of logging
Args:
config: Config
test_set: instance of class Dataset
path_label: dataframe
Return:
scores: (dict) scores["acc"] = 0.85 for instance
"""
self.logger.info("- Evaluating...")
scores = self._run_evaluate_epoch(config, test_set, path_label) # evaluate
msg = " ... ".join([" {} is {:04.2f} ".format(k, v) for k, v in scores.items()])
self.logger.info("- Eval: {}".format(msg))
return scores
def _auto_backward(self, loss):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ! MUST OVERWRITE
def _run_train_epoch(config, train_set, val_set, epoch, lr_schedule, path_label):
"""Model_specific method to overwrite
Performs an epoch of training
Args:
config: Config
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score
Alert:
you can use the method below to simplify your code
_auto_backward(self, loss)
"""
raise NotImplementedError("Performs an epoch of training")
# ! MUST OVERWRITE
def _run_evaluate_epoch(config, test_set):
"""Model-specific method to overwrite
Performs an epoch of evaluation
Args:
config: Config
test_set: Dataset instance
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
raise NotImplementedError("Performs an epoch of evaluation")
|
py | 1a487cd816c7bf63e02e041c9a8e97ecab13ef7f | if __name__ == "__main__":
#? string.split(separator, maxsplit)
x, y = [set(input().split())for _ in range(4)][1::2]
print(len(x-y))
|
py | 1a487d0006d9e6417e6ebd0db5fbbaa7b9c90239 | import requests
base_url = 'https://api.ng.termii.com/api'
class Token:
def __init__(self, api_key):
self.api_key = api_key
def send_token(self, message_type, to, from_id, channel, pin_attempts, pin_time_to_live,
pin_length, pin_placeholder, message_text, pin_type):
url = base_url + f'/sms/otp/send'
payload = {
"api_key": self.api_key,
"message_type": message_type,
"to": to,
"from": from_id,
"channel": channel,
"pin_attempts": int(pin_attempts),
"pin_time_to_live": pin_time_to_live,
"pin_length": pin_length,
"pin_placeholder": pin_placeholder,
"message_text": message_text,
"pin_type": pin_type
}
headers = {
'Content-Type': 'application/json',
}
response = requests.post(url, headers=headers, json=payload)
return response.json()
def voice_token(self, phone_number, pin_attempts, pin_time_to_live,
pin_length):
url = base_url + f'/sms/otp/send/voice'
payload = {
"api_key": self.api_key,
"phone_number": phone_number,
"pin_attempts": int(pin_attempts),
"pin_time_to_live": int(pin_time_to_live),
"pin_length": int(pin_length),
}
headers = {
'Content-Type': 'application/json',
}
response = requests.post(url, headers=headers, json=payload)
return response.json()
def voice_call(self, phone_number, code):
url = base_url + f'/sms/otp/call'
payload = {
"api_key": self.api_key,
"phone_number": phone_number,
"code": int(code)
}
headers = {
'Content-Type': 'application/json',
}
response = requests.post(url, headers=headers, json=payload)
return response.json()
def in_app_token(self, pin_type, phone_number, pin_attempts, pin_time_to_live, pin_length):
url = base_url + f'/sms/otp/generate'
payload = {
"api_key": self.api_key,
"pin_type": pin_type,
"phone_number": phone_number,
"pin_attempts": pin_attempts,
"pin_time_to_live": pin_time_to_live,
"pin_length": pin_length
}
headers = {
'Content-Type': 'application/json',
}
response = requests.post(url, headers=headers, json=payload)
print(response.json())
return response.json()
def verify_token(self, pin_id, pin):
url = base_url + f'/sms/otp/verify'
payload = {
"api_key": self.api_key,
"pin_id": pin_id,
"pin": pin,
}
headers = {
'Content-Type': 'application/json',
}
response = requests.post(url, headers=headers, json=payload)
return response.json()
|
py | 1a487dc3c6b8ca647c293135ee9865b8885734a7 | # -*- encoding: utf-8 -*-
from django.db import models
class Tweet(models.Model):
id = models.AutoField(primary_key=True)
username = models.CharField(max_length=200)
short_description = models.TextField()
predict = models.CharField(max_length=200)
class Meta:
db_table = "tweets"
def __str__(self):
return self.name
class User(models.Model):
id = models.AutoField(primary_key=True)
username = models.CharField(max_length=200)
followers = models.IntegerField()
followings = models.IntegerField()
favorites = models.IntegerField()
tweets_count = models.IntegerField()
profile_pic = models.CharField(max_length=200)
cover_pic = models.CharField(max_length=200)
wcloud_pic = models.CharField(max_length=200)
name = models.CharField(max_length=200)
bio = models.CharField(max_length=200)
location = models.CharField(max_length=200)
website = models.CharField(max_length=200)
join_at = models.CharField(max_length=200)
class Meta:
db_table = "users"
def __str__(self):
return self.username
|
py | 1a487defa522c9495b3cb0ba7589870688bdcbe6 | import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='yanchor', parent_name='mesh3d.colorbar', **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop('values', ['top', 'middle', 'bottom']),
**kwargs
)
|
py | 1a487e1863ac52fb0b4dc75165047e7671a47dfc | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from jinja2 import escape
from jinja2 import Markup
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from . import json
from ._compat import json_available
from .app import Flask
from .app import Request
from .app import Response
from .blueprints import Blueprint
from .config import Config
from .ctx import after_this_request
from .ctx import copy_current_request_context
from .ctx import has_app_context
from .ctx import has_request_context
from .globals import _app_ctx_stack
from .globals import _request_ctx_stack
from .globals import current_app
from .globals import g
from .globals import request
from .globals import session
from .helpers import flash
from .helpers import get_flashed_messages
from .helpers import get_template_attribute
from .helpers import make_response
from .helpers import safe_join
from .helpers import send_file
from .helpers import send_from_directory
from .helpers import stream_with_context
from .helpers import url_for
from .json import jsonify
from .signals import appcontext_popped
from .signals import appcontext_pushed
from .signals import appcontext_tearing_down
from .signals import before_render_template
from .signals import got_request_exception
from .signals import message_flashed
from .signals import request_finished
from .signals import request_started
from .signals import request_tearing_down
from .signals import signals_available
from .signals import template_rendered
from .templating import render_template
from .templating import render_template_string
__version__ = "1.1.2"
|
py | 1a487e6c872f2c6b07fb17af356a42c8d7cbc042 | from __future__ import print_function
import numpy as np
import pytest
def pytest_runtest_setup(item):
seed = np.random.randint(1000)
print("Seed used in np.random.seed(): %d" % seed)
np.random.seed(seed)
def pytest_addoption(parser):
parser.addoption(
"--block",
action="store",
default=True,
help="Whether the plotting should block execution."
)
@pytest.fixture
def block(request):
try:
return request.config.getoption("--block") not in "False,false,no,0".split(",")
except ValueError:
return True
|
py | 1a487e942f86f435d4e5cdc26c1a5035d545c23a | from rest_framework import status
from rest_framework.reverse import reverse
from tests.test_profile.test_quota.base_test_quota import BaseTestQuota
from tests.utils import check_data_in_dict
class TestApiQuotaCreate(BaseTestQuota):
def setUp(self):
super(TestApiQuotaCreate, self).setUp()
self.post_data = {
'name': "My new quota",
'attribute_definitions': [self.memory_attributes.first().id, self.cpu_attributes.first().id]
}
self.create_quota_url = reverse('api_quota_list_create')
def _create_quota(self):
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
check_data_in_dict(self, [self.post_data], [response.data])
def _create_quota_failed(self, status_error=status.HTTP_400_BAD_REQUEST):
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status_error)
def test_admin_post_quota(self):
self._create_quota()
def test_cannot_post_quota_with_existing_name(self):
self._create_quota()
self._create_quota_failed()
def test_customer_cannot_post_quota(self):
self.client.force_login(user=self.standard_user)
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_post_quota_when_logout(self):
self.client.logout()
response = self.client.post(self.create_quota_url, data=self.post_data,
content_type="application/json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
py | 1a487ed26be2d8be36067c8a5b0ac2c8ccaa6859 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import io
import datetime as dt
import asyncio
import discord
from urllib.parse import urljoin
import pyrebase
from discord import Message
from discord import Server
from discord import ChannelType
from discord.ext import commands
from discord.ext.commands import Command
from discord.ext.commands import Context
from cogs.utils import checks
from __main__ import send_cmd_help
import aiohttp
from cogs.utils.dataIO import dataIO
PATH = os.path.join('data', 'firebase')
JSON = os.path.join(PATH, 'settings.json')
SERVICE_KEY_JSON = os.path.join(PATH, "service_key.json")
APP_NAME = "Discord"
REQUIRED_SETTINGS = [
'SERVER_KEY',
'AUTH_DOMAIN',
'DATABASE_URL',
'STORAGE_BUCKET',
'SERVICE_ACCOUNT'
]
HELP_SETTINGS = 'Please set all settings.'
class Firebase:
"""Send activity of Discord using Google Analytics."""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = dataIO.load_json(JSON)
self._fbapp = None
@property
def fbapp(self):
"""Firebase application reference using pyrebase."""
if self._fbapp is None:
if not self.check_settings():
return None
config = {
"apiKey": self.settings["SERVER_KEY"],
"authDomain": self.settings["AUTH_DOMAIN"],
"databaseURL": self.settings["DATABASE_URL"],
"storageBucket": self.settings['STORAGE_BUCKET'],
"serviceAccount": self.settings['SERVICE_ACCOUNT']
}
self._fbapp = pyrebase.initialize_app(config)
return self._fbapp
def check_settings(self):
"""Check all settings set."""
for setting in REQUIRED_SETTINGS:
if setting not in self.settings:
return False
if not self.settings[setting]:
return False
return True
@checks.serverowner_or_permissions(manage_server=True)
@commands.group(pass_context=True)
async def setfirebase(self, ctx):
"""Set Firebase settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@setfirebase.command(name="servicekey", pass_context=True)
async def setfirebase_service_key(self, ctx):
"""Set Firebase Service key.
This is generated by the Firebase Console.
You can get it here:
https://console.firebase.google.com/project/_/settings/serviceaccounts/adminsdk
"""
TIMEOUT = 30.0
await self.bot.say(
"Please upload the Firebase service account key (json). "
"[Timeout: {} seconds]".format(TIMEOUT))
attach_msg = await self.bot.wait_for_message(
timeout=TIMEOUT,
author=ctx.message.author)
if attach_msg is None:
await self.bot.say("Operation time out.")
return
if not len(attach_msg.attachments):
await self.bot.say("Cannot find attachments.")
return
attach = attach_msg.attachments[0]
url = attach["url"]
async with aiohttp.get(url) as cred:
with open(SERVICE_KEY_JSON, "wb") as f:
f.write(await cred.read())
await self.bot.say(
"Attachment received and saved as {}".format(SERVICE_KEY_JSON))
self.settings['SERVICE_ACCOUNT'] = SERVICE_KEY_JSON
dataIO.save_json(JSON, self.settings)
# Delete uploaded attachment
await self.bot.delete_message(attach_msg)
@setfirebase.command(name="serverkey", pass_context=True)
async def setfirebase_server_key(self, ctx, key):
"""Set Firebase Cloud Messaging Server Key.
This is generated by the Firebase Console
You can get it here:
https://console.firebase.google.com/project/_/settings/cloudmessaging
"""
self.settings["SERVER_KEY"] = key
dataIO.save_json(JSON, self.settings)
await self.bot.say("Saved Firebase Cloud Messaging Server Key.")
await self.bot.delete_message(ctx.message)
@setfirebase.command(name="authdomain", pass_context=True)
async def setfirebase_auth_domain(self, ctx, domain):
"""Set Auth Domain.
This is the URL in this format:
projectid.firebaseapp.com
"""
self.settings["AUTH_DOMAIN"] = domain
dataIO.save_json(JSON, self.settings)
await self.bot.say("Saved Firebase Auth Domain.")
await self.bot.delete_message(ctx.message)
@setfirebase.command(name="databaseurl", pass_context=True)
async def setfirebase_database_url(self, ctx, url):
"""Set Database URL.
This is the database URL in this format:
https://projectid.firebaseio.com
"""
self.settings["DATABASE_URL"] = url
dataIO.save_json(JSON, self.settings)
await self.bot.say("Saved Firebase Database URL.")
await self.bot.delete_message(ctx.message)
@setfirebase.command(name="storagebucket", pass_context=True)
async def setfirebase_storage_bucket(self, ctx, domain):
"""Set Storage Bucket.
This is the database URL in this format:
projectid.appspot.com
"""
self.settings["STORAGE_BUCKET"] = domain
dataIO.save_json(JSON, self.settings)
await self.bot.say("Saved Firebase Storage bucket.")
await self.bot.delete_message(ctx.message)
@checks.serverowner_or_permissions(manage_server=True)
@commands.group(pass_context=True)
async def firebase(self, ctx):
"""Run Firebase commands."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@firebase.command(name="status", pass_context=True)
async def firebase_status(self, ctx):
"""Show Firebase settings status."""
if not self.check_settings():
await self.bot.say("You are missing some settings.")
em = discord.Embed(title="Firebase Settings")
for setting in REQUIRED_SETTINGS:
data_key = setting
data_value = "--"
if setting in self.settings:
data_value = self.settings[setting]
em.add_field(name=data_key, value=data_value)
await self.bot.send_message(ctx.message.author, embed=em)
await self.bot.say("Firebase settings have been sent as DM.")
@firebase.command(name="toggle", pass_context=True)
async def firebase_toggle(self, ctx):
"""Toggle server on/off."""
server = ctx.message.server
if "SERVERS" not in self.settings:
self.settings["SERVERS"] = {}
if server.id not in self.settings["SERVERS"]:
self.settings["SERVERS"][server.id] = False
is_on = self.settings["SERVERS"][server.id]
self.settings["SERVERS"][server.id] = not is_on
await self.bot.say(
"Firebase monitoring for this server set to {}.".format(
self.settings["SERVERS"][server.id]))
dataIO.save_json(JSON, self.settings)
@firebase.command(name="data", pass_context=True)
async def firebase_data(self, ctx, *, msg):
author = ctx.message.author
data = {
"author": author.display_name,
"author_id": author.id,
"message": msg
}
db = self.fbapp.database()
db.child("users").push(data)
async def on_message(self, msg: Message):
"""Track on message."""
author = msg.author
server = msg.server
# check settings are set
if not self.check_settings():
return
# check server is tracked
tracking_server = False
try:
tracking_server = self.settings["SERVERS"][server.id]
except KeyError:
return
if not tracking_server:
return
data = {
"author": author.display_name,
"author_id": author.id,
"message": msg.content,
"datetime": dt.datetime.utcnow().isoformat()
}
db = self.fbapp.database()
db.child("servers").child(server.id).push(data)
def check_folder():
"""Check folder."""
if not os.path.exists(PATH):
os.makedirs(PATH)
def check_file():
"""Check files."""
defaults = {}
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, defaults)
def setup(bot):
"""Setup bot."""
check_folder()
check_file()
n = Firebase(bot)
bot.add_cog(n)
|
py | 1a487fe69a051047dfcc8e878385cb376c7eb914 | """Tests for transformers.padding.py."""
import numpy as np
import pytest
class TestPadder2d:
@pytest.fixture
def padder_cls(self):
from dstoolbox.transformers import Padder2d
return Padder2d
@pytest.fixture
def padder(self, padder_cls):
return padder_cls(max_len=4, pad_value=55, dtype=np.int64)
@pytest.fixture
def data(self):
return [
[],
[0, 1, 2],
[10, 11, 12, 13, 14, 15],
[100, 101, 102, 103],
]
@pytest.fixture
def expected(self):
return np.asarray([
[55, 55, 55, 55],
[0, 1, 2, 55],
[10, 11, 12, 13],
[100, 101, 102, 103]
])
def test_fit_and_transform_works(self, padder, data, expected):
result = padder.fit(data).transform(data)
assert np.allclose(result, expected)
assert result.dtype == np.int64
assert isinstance(result, np.ndarray)
def test_fit_transform_works(self, padder, data, expected):
result = padder.fit_transform(data)
assert np.allclose(result, expected)
assert result.dtype == np.int64
assert isinstance(result, np.ndarray)
@pytest.mark.parametrize('max_len', [1, 2, 3])
def test_other_max_len(self, padder_cls, data, expected, max_len):
padder = padder_cls(max_len=max_len, pad_value=55, dtype=np.int64)
result = padder.fit_transform(data)
assert np.allclose(result, expected[:, :max_len])
assert result.dtype == np.int64
def test_other_pad_value(self, padder_cls, data, expected):
padder = padder_cls(max_len=4, pad_value=-999, dtype=np.int64)
result = padder.fit_transform(data)
expected[expected == 55] = -999
assert np.allclose(result, expected)
assert result.dtype == np.int64
def test_other_dtype(self, padder_cls, data, expected):
padder = padder_cls(max_len=4, pad_value=55, dtype=np.float16)
result = padder.fit_transform(data)
assert np.allclose(result, expected)
assert result.dtype == np.float16
class TestPadder3d:
@pytest.fixture
def padder_cls(self):
from dstoolbox.transformers import Padder3d
return Padder3d
@pytest.fixture
def padder(self, padder_cls):
return padder_cls(max_size=(4, 2), pad_value=55, dtype=np.int64)
@pytest.fixture
def data(self):
return [
[],
[[0, 0], [1, 1, 1], [2]],
[[10], [], [12, 12, 12], [13], [], [15]],
[[100], [101], [102], [103, 104, 105]],
]
@pytest.fixture
def expected(self):
return np.asarray([
[[55, 55], [55, 55], [55, 55], [55, 55]],
[[0, 0], [1, 1], [2, 55], [55, 55]],
[[10, 55], [55, 55], [12, 12], [13, 55]],
[[100, 55], [101, 55], [102, 55], [103, 104]],
])
def test_fit_and_transform_works(self, padder, data, expected):
result = padder.fit(data).transform(data)
assert np.allclose(result, expected)
assert result.dtype == np.int64
assert isinstance(result, np.ndarray)
def test_fit_transform_works(self, padder, data, expected):
result = padder.fit_transform(data)
assert np.allclose(result, expected)
assert result.dtype == np.int64
assert isinstance(result, np.ndarray)
@pytest.mark.parametrize('max_size_0', [1, 2, 3])
def test_max_size_0(self, padder_cls, data, expected, max_size_0):
padder = padder_cls(
max_size=(max_size_0, 2), pad_value=55, dtype=np.int64)
result = padder.fit_transform(data)
assert np.allclose(result, expected[:, :max_size_0])
assert result.dtype == np.int64
def test_max_size_1(self, padder_cls, data, expected):
padder = padder_cls(
max_size=(4, 1), pad_value=55, dtype=np.int64)
result = padder.fit_transform(data)
assert np.allclose(result, expected[:, :, :1])
assert result.dtype == np.int64
def test_other_pad_value(self, padder_cls, data, expected):
padder = padder_cls(
max_size=(4, 2), pad_value=-999, dtype=np.int64)
result = padder.fit_transform(data)
expected[expected == 55] = -999
assert np.allclose(result, expected)
assert result.dtype == np.int64
def test_other_dtype(self, padder_cls, data, expected):
padder = padder_cls(
max_size=(4, 2), pad_value=55, dtype=np.float16)
result = padder.fit_transform(data)
assert np.allclose(result, expected)
assert result.dtype == np.float16
|
py | 1a4880dbac82a82bce039a0e556a907dcf7f05fe | class ClusteringTypes:
type = "type"
params = "params"
rmsd = "rmsd"
contactMap = "contactMap"
lastSnapshot = "lastSnapshot"
null = "null"
MSMClustering = "MSM"
thresholdCalculator = "thresholdCalculator"
ligandResname = "ligandResname"
ligandResnum = "ligandResnum"
ligandChain = "ligandChain"
alternativeStructure = "alternativeStructure"
contactThresholdDistance = "contactThresholdDistance"
nclusters = "nclusters"
similarityEvaluator = "similarityEvaluator"
differenceDistance = "differenceDistance"
Jaccard = "Jaccard"
correlation = "correlation"
symmetries = "symmetries"
tica = "tica"
atom_Ids = "atom_Ids"
writeCA = "writeCA"
sidechains = "sidechains"
tica_lagtime = "tica_lagtime"
tica_nICs = "tica_nICs"
tica_kinetic_map = "tica_kinetic_map"
tica_commute_map = "tica_commute_map"
class ThresholdCalculator:
type = "type"
params = "params"
heaviside = "heaviside"
constant = "constant"
class ThresholdCalculatorParams:
conditions = "conditions"
values = "values"
value = "value"
class DensityCalculator:
type = "type"
params = "params"
heaviside = "heaviside"
null = "null"
constant = "constant"
continuous = "continuous"
exitContinuous = "exitContinuous"
class DensityCalculatorParams:
conditions = "conditions"
values = "values"
class StringSpawningTypes:
type = "type"
independent = "independent"
independentMetric = "independentMetric"
sameWeight = "sameWeight"
inverselyProportional = "inverselyProportional"
epsilon = "epsilon"
fast = "FAST"
simulatedAnnealing = "simulatedAnnealing"
variableEpsilon = "variableEpsilon"
UCB = "UCB"
REAP = "REAP"
null = "null"
ProbabilityMSMCalculator = "ProbabilityMSM"
MetastabilityMSMCalculator = "MetastabilityMSM"
UncertaintyMSMCalculator = "UncertaintyMSM"
IndependentMSMCalculator = "IndependentMSM"
class SpawningParams:
params = "params"
epsilon = "epsilon"
temperature = "T"
threshold = "threshold"
report_filename = "reportFilename"
report_col = "metricColumnInReport"
minValue = "min"
maxValue = "max"
condition = "condition"
# New parameters for variable epsilon(experimental)
varEpsilonType = "varEpsilonType"
maxEpsilon = "maxEpsilon"
minEpsilon = "minEpsilon"
variationWindow = "variationWindow" # Last epoch of variable epsilon,if
# current epoch > than variation Window, set epsilon to minEpsilon
maxEpsilonWindow = "maxEpsilonWindow"
period = "period" # Only useful for periodic epsilon modes
density = "density"
metricWeights = "metricWeights"
linear = "linear"
boltzmann = "boltzmann"
alpha = "alpha"
nclusters = "n"
metricsInd = "metricsInd"
lagtime = "lagtime"
minPos = "minPos"
class SpawningDensity:
values = "values"
conditions = "conditions"
class VariableEpsilonTypes:
linearVariation = "linearVariation"
contactsVariation = "contactsVariation"
class SimulationType:
type = "type"
pele = "pele"
md = "md"
test = "test"
class SimulationParams:
params = "params"
processors = "processors"
executable = "executable"
templetizedControlFile = "controlFile"
dataFolder = "data"
documentsFolder = "documents"
destination = "destination"
origin = "origin"
seed = "seed"
peleSteps = "peleSteps"
iterations = "iterations"
exitCondition = "exitCondition"
metricCol = "metricCol"
exitValue = "exitValue"
trajectories = "trajectories"
modeMovingBox = "modeMovingBox"
modeMovingBoxBinding = "binding"
modeMovingBoxUnBinding = "unbinding"
equilibrationMode = "equilibrationMode"
equilibrationLastSnapshot = "equilibrationLastSnapshot"
equilibrationSelect = "equilibrationSelect"
equilibrationCluster = "equilibrationCluster"
numberEquilibrationStructures = "numberEquilibrationStructures"
boxCenter = "boxCenter"
boxRadius = "boxRadius"
runEquilibration = "runEquilibration"
condition = "condition"
numTrajs = "numberTrajectories"
equilibrationLength = "equilibrationLength"
trajectoryName = "trajectoryName"
srun = "useSrun"
srunParameters = "srunParameters"
mpiParameters = "mpiParameters"
# params for MD
ligandCharge = "ligandCharge"
nonBondedCutoff = "nonBondedCutoff"
Temperature = "temperature"
runningPlatform = "runningPlatform"
minimizationIterations = "minimizationIterations"
repoterfreq = "reporterFrequency"
productionLength = "productionLength"
waterBoxSize = "WaterBoxSize"
trajsPerReplica = "trajectoriesPerReplica"
numReplicas = "numReplicas"
timeStep = "timeStep"
equilibrationLengthNVT = "equilibrationLengthNVT"
equilibrationLengthNPT = "equilibrationLengthNPT"
constraintsMin = "constraintsMinimization"
constraintsNVT = "constraintsNVT"
constraintsNPT = "constraintsNPT"
devicesPerTrajectory = "devicesPerTrajectory"
forcefield = "forcefield"
customparamspath = "customparamspath"
maxDevicesPerReplica = "maxDevicesPerReplica"
format = "format"
ligandName = "ligandName"
class ExitConditionType:
type = "type"
metric = "metric"
clustering = "clustering"
metricMultipleTrajs = "metricMultipleTrajectories"
class ControlFileParams:
generalParams = "generalParams"
spawningBlockname = "spawning"
simulationBlockname = "simulation"
clusteringBlockname = "clustering"
class GeneralParams:
restart = "restart"
outputPath = "outputPath"
initialStructures = "initialStructures"
debug = "debug"
writeAllClustering = "writeAllClusteringStructures"
nativeStructure = "nativeStructure"
|
py | 1a48811ae8752c5044870beb2801a82d07ff1f3e | import nltk
from nltk.corpus import cmudict
import curses
from curses.ascii import isdigit
import re
from nltk.probability import FreqDist
from app import db
import models
from random import randrange
d = cmudict.dict()
def numSylsInWord(word):
if word.lower() in d:
return [len(list(y for y in x if y[-1].isdigit())) for x in d[word.lower()]][0]
def isHaiku(potentialHaiku):
syllableCount = countSyllables(potentialHaiku)
if syllableCount == 17:
result = True
else:
result = False
return result
def countSyllables(potentialHaiku):
stripPunctuation = re.sub(ur"[^\w\d'\s]+",' ',potentialHaiku)
wordsInHaiku = stripPunctuation.split()
syllableCount = 0
for i in wordsInHaiku:
syllableCount += numSylsInWord(i)
return syllableCount
def inDatabase(firstWord):
container = []
from models import Unigram
unigrams = Unigram.query.filter(Unigram.word1 == firstWord)
for each in unigrams:
addWords = [each.word2 for unigram in xrange(each.count)]
container.append(addWords)
if not container:
return False
if container:
return True
def generateHaiku(firstWord):
inDB = inDatabase(firstWord)
if inDB:
haiku = startGenerateLine(5, firstWord)
haiku += "\n"
haiku += startGenerateLine(7)
haiku += "\n"
haiku += startGenerateLine(5)
if not inDB:
firstWord = pickRandomWord(5)
haiku = generateHaiku(firstWord)
return haiku
def startGenerateLine(sylCount, startingWord= None):
if not startingWord:
startingWord = pickRandomWord(sylCount)
remainingSylCount = sylCount - countSyllables(startingWord)
line = buildLineList(remainingSylCount, [startingWord])
return " ".join(line)
def buildLineList(sylCount, wordsFromBefore):
from random import shuffle
if sylCount == 0:
return wordsFromBefore
lastWord = wordsFromBefore[-1]
possibilities = createPossibleWords(lastWord, sylCount)
for possibleWord in possibilities:
newWordsFromBefore = [word[:] for word in wordsFromBefore]
newWordsFromBefore.append(possibleWord)
newSyllableCount = sylCount - countSyllables(possibleWord)
result = buildLineList(newSyllableCount, newWordsFromBefore)
if result:
return result
return None
def pickRandomWord(reqSylCount):
from models import Unigram
lengthDB = Unigram.query.count()
while True:
randomNumPick = randrange(1, lengthDB)
tryWord = Unigram.query.filter(Unigram.id == randomNumPick).first()
if countSyllables(tryWord.word1) <= reqSylCount:
word = tryWord.word1
break
return tryWord.word1
def createPossibleWords(lastWord, sylCount):
from random import shuffle
possibilities = grabPossibleWords(lastWord, sylCount)
shuffle(possibilities)
return possibilities
def grabPossibleWords(baseWord, reqSylCount):
from models import Unigram
listOfUnigrams = Unigram.query.filter(Unigram.word1 ==baseWord)
return filterPossibleWords(listOfUnigrams, reqSylCount)
def filterPossibleWords(unigrams, reqSylCount):
if reqSylCount == 1 or reqSylCount == 2:
filteredUnigrams = removePartOfSpeech(unigrams)
filteredUnigrams = removeBadWords(filteredUnigrams)
filteredWords = sylCountFilter(filteredUnigrams, reqSylCount)
return filteredWords
else:
filteredWords = sylCountFilter(unigrams, reqSylCount)
return filteredWords
def removePartOfSpeech(unigrams):
filteredUnigrams = [unigram for unigram in unigrams if identifyPartsOfSpeech(unigram.word2) not in ['IN', 'CC', 'DT']]
return filteredUnigrams
def removeBadWords(unigrams):
filteredUnigrams = [unigram for unigram in unigrams if unigram.word2 not in ['so','mr','oh','it','the', 'and', 'i', 'of', 'at', 'we', 'for', 'by', 'but', 'to', 'a', 'as', 'like', 'than', 'with', "i'm"]]
return filteredUnigrams
def sylCountFilter(unigrams, reqSylCount):
filteredWords = [unigram.word2 for unigram in unigrams if countSyllables(unigram.word2) <= reqSylCount]
return filteredWords
def identifyPartsOfSpeech(word):
cleanString = re.sub(ur"[^\w\d'\s]+",' ', word)
pos = nltk.word_tokenize(cleanString)
result = nltk.pos_tag(pos)
return result[0][1]
|
py | 1a48816c298f9631422c13aa96e9710ab638924d | from flask.ext.classy import FlaskView, route
from flask import render_template, redirect, url_for, flash
from flask_menu.classy import classy_menu_item
from flask_login import current_user, login_required
from Application.models import User
from Application.models import Project
from .forms import UserEditForm
from Application import db
from speaklater import make_lazy_string
@make_lazy_string
def account_text():
if current_user.is_authenticated:
return "Account ({})".format(current_user.fullname)
return "Account"
def show_menu():
return current_user.is_authenticated
class Profile(FlaskView):
route_base = '/profile'
@classy_menu_item('frontend-right.account', account_text, visible_when=show_menu, order=1)
@classy_menu_item('frontend-right.account.profile', 'My Profile', order=0)
@login_required
def index(self):
return redirect(url_for('.Profile:me'))
@login_required
@route('/me/')
def me(self):
user = current_user
projects = None
if user.projects.count():
projects = user.projects.order_by(Project.date_uploaded.desc())
following = False
if current_user.is_authenticated:
following = current_user.following.filter_by(zid=current_user.zid).count() != 0
return render_template(
'.profile/index.html',
user=user, following=following, projects=projects)
@route('/<string:user_id>/')
def user(self, user_id):
user = User.query.filter(User.zid == user_id).first_or_404()
projects = None
if user.projects.count():
projects = user.projects.order_by(Project.date_uploaded.desc())
following = False
if current_user.is_authenticated:
following = current_user.following.filter_by(zid=user_id).count() != 0
return render_template(
'.profile/index.html',
user=user,
following=following,
projects=projects)
@login_required
@route('/edit/', methods=['GET', 'POST'])
def edit(self):
form = UserEditForm(obj=current_user)
if form.submit.data and form.validate_on_submit:
# update the user's details
current_user.website = form.website.data
current_user.github_username = form.github_username.data
current_user.email = form.email.data
current_user.about = form.about.data
db.session.add(current_user)
db.session.commit()
flash('Sucessfully updated your details!', 'success')
return redirect(url_for('.Profile:me'))
return render_template(
".profile/edit_user.html",
is_form=True, form=form, user=current_user)
@login_required
@route('/follow/<string:user_id>/')
def follow(self, user_id):
following = current_user.following.filter_by(zid=user_id).count()
if user_id == current_user.zid:
flash("Error: you cannot follow yourself", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
if following:
flash("Error: you already follow this user", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
# Add follower relationship here
followee = User.query.get_or_404(user_id)
current_user.following.append(followee)
db.session.add(current_user)
db.session.commit()
flash("User followed successfully", 'success')
return redirect(url_for('.Profile:user', user_id=user_id))
@login_required
@route('/unfollow/<string:user_id>/')
def unfollow(self, user_id):
following = current_user.following.filter_by(zid=user_id).count()
if user_id == current_user.zid:
flash("Error: you cannot unfollow yourself", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
if following:
# Remove relationship here
followee = User.query.get_or_404(user_id)
current_user.following.remove(followee)
db.session.add(current_user)
db.session.commit()
flash("User unfollowed successfully", 'success')
return redirect(url_for('.Profile:user', user_id=user_id))
flash("Error: you don't follow this user", 'danger')
return redirect(url_for('.Profile:user', user_id=user_id))
|
py | 1a4882b70d2a7191fa0e7d3c987b7fba63f9f647 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Shreyas Srish (@shrsr) <[email protected]>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_bd_dhcp_policy
short_description: Manage BD DHCP Policy in schema templates
description:
- Manage BD DHCP policies in schema templates on Cisco ACI Multi-Site.
author:
- Shreyas Srish (@shrsr)
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template to change.
type: str
required: yes
bd:
description:
- The name of the BD to manage.
type: str
required: yes
dhcp_policy:
description:
- The DHCP Policy
type: str
aliases: [ name ]
version:
description:
- The version of DHCP Relay Policy.
type: int
dhcp_option_policy:
description:
- The DHCP Option Policy.
type: dict
suboptions:
name:
description:
- The name of the DHCP Option Policy.
type: str
required: yes
version:
description:
- The version of the DHCP Option Policy.
type: int
required: yes
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- This module can only be used on versions of MSO that are 3.1.1h or greater.
extends_documentation_fragment: cisco.mso.modules
'''
EXAMPLES = r'''
- name: Add a new DHCP policy to a BD
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
name: ansible_test
version: 1
dhcp_option_policy:
name: ansible_test_option
version: 1
state: present
delegate_to: localhost
- name: Remove a DHCP policy from a BD
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
name: ansible_test
version: 1
state: absent
delegate_to: localhost
- name: Query a specific BD DHCP Policy
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
name: ansible_test
state: query
delegate_to: localhost
register: query_result
- name: Query all BD DHCP Policies
cisco.mso.mso_schema_template_bd_dhcp_policy:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.mso.plugins.module_utils.mso import MSOModule, mso_argument_spec, mso_dhcp_option_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
bd=dict(type='str', required=True),
dhcp_policy=dict(type='str', aliases=['name']),
version=dict(type='int'),
dhcp_option_policy=dict(type='dict', options=mso_dhcp_option_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dhcp_policy']],
['state', 'present', ['dhcp_policy', 'version']],
],
)
schema = module.params.get('schema')
template = module.params.get('template').replace(' ', '')
bd = module.params.get('bd')
dhcp_policy = module.params.get('dhcp_policy')
dhcp_option_policy = module.params.get('dhcp_option_policy')
version = module.params.get('version')
state = module.params.get('state')
mso = MSOModule(module)
# Get schema
schema_id, schema_path, schema_obj = mso.query_schema(schema)
# Get template
templates = [t.get('name') for t in schema_obj.get('templates')]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get BD
bds = [b.get('name') for b in schema_obj.get('templates')[template_idx]['bds']]
if bd not in bds:
mso.fail_json(msg="Provided BD '{0}' does not exist. Existing BDs: {1}".format(bd, ', '.join(bds)))
bd_idx = bds.index(bd)
# Check if DHCP policy already exists
if dhcp_policy:
check_policy = mso.get_obj("policies/dhcp/relay", name=dhcp_policy, key="DhcpRelayPolicies")
if check_policy:
pass
else:
mso.fail_json(msg="DHCP policy '{dhcp_policy}' does not exist".format(dhcp_policy=dhcp_policy))
# Check if DHCP option policy already exists
if dhcp_option_policy:
check_option_policy = mso.get_obj("policies/dhcp/option", name=dhcp_option_policy.get('name'), key="DhcpRelayPolicies")
if check_option_policy:
pass
else:
mso.fail_json(msg="DHCP option policy '{dhcp_option_policy}' does not exist".format(dhcp_option_policy=dhcp_option_policy.get('name')))
# Get DHCP policies
dhcp_policies = [s.get('name') for s in schema_obj.get('templates')[template_idx]['bds'][bd_idx]['dhcpLabels']]
if dhcp_policy in dhcp_policies:
dhcp_idx = dhcp_policies.index(dhcp_policy)
# FIXME: Changes based on index are DANGEROUS
dhcp_policy_path = '/templates/{0}/bds/{1}/dhcpLabels/{2}'.format(template, bd, dhcp_idx)
mso.existing = schema_obj.get('templates')[template_idx]['bds'][bd_idx]['dhcpLabels'][dhcp_idx]
if state == 'query':
if dhcp_policy is None:
mso.existing = schema_obj.get('templates')[template_idx]['bds'][bd_idx]['dhcpLabels']
elif not mso.existing:
mso.fail_json(msg="DHCP policy not associated with the bd")
mso.exit_json()
dhcp_policy_paths = '/templates/{0}/bds/{1}/dhcpLabels'.format(template, bd)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=dhcp_policy_path))
elif state == 'present':
payload = dict(
name=dhcp_policy,
version=version,
dhcpOptionLabel=dhcp_option_policy,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=dhcp_policy_path, value=mso.sent))
else:
ops.append(dict(op='add', path=dhcp_policy_paths + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
py | 1a4882c3da83ba142ac0a2b7395c99424488cd6a | """Tests."""
from django.test import TestCase
from django.contrib.auth import get_user_model
from .. import NotificationError
from ..models import Notification
from ..signals import read, notify
class GeneralTestCase(TestCase):
"""Tests for General functionality."""
User = get_user_model()
@classmethod
def setUpTestData(cls):
"""Create Users."""
cls.user1 = cls.User.objects.create_user(
username='[email protected]', password='password'
)
cls.user2 = cls.User.objects.create(
username='[email protected]', password='password'
)
def test_to_json_without_extra_data(self):
"""
If the extra_data argument is ommitted,
the default should be an empty dictionary
"""
# Create notification
notification = Notification.objects.create(
source=self.user2, source_display_name='User 2',
recipient=self.user1, action='Notified',
category='General notification', obj=1, url='http://example.com',
short_description='Short Description', is_read=False,
)
self.assertEqual(
notification.to_json(),
{
'source': self.user2.id, 'source_display_name': 'User 2',
'recipient': self.user1.id, 'action': 'Notified',
'category': 'General notification', 'obj': 1,
'short_description': 'Short Description',
'extra_data': {}, 'channels': '',
'url': 'http://example.com', 'is_read': False
}
)
def test_to_json_with_extra_data(self):
"""Test to_json method with extra data."""
notification = Notification.objects.create(
source=self.user2, source_display_name='User 2',
recipient=self.user1, action='Notified',
category='General notification', obj=1, url='http://example.com',
short_description='Short Description', is_read=False,
extra_data={'hello': 'world'}
)
self.assertEqual(
notification.to_json(),
{
'source': self.user2.id, 'source_display_name': 'User 2',
'recipient': self.user1.id, 'action': 'Notified',
'category': 'General notification', 'obj': 1,
'short_description': 'Short Description', 'channels': '',
'url': 'http://example.com', 'extra_data': {'hello': 'world'},
'is_read': False,
}
)
class NotificationSignalTestCase(TestCase):
"""Tests for the notification signals."""
User = get_user_model()
@classmethod
def setUpTestData(cls):
"""Create Users."""
cls.user1 = cls.User.objects.create_user(
username='[email protected]', password='password'
)
cls.user2 = cls.User.objects.create(
username='[email protected]', password='password'
)
def test_user_cant_read_others_notifications(self):
"""A user should only be able to read THEIR notifications."""
# Create Notification for User2
notification = Notification.objects.create(
source=self.user1, source_display_name='User 1',
recipient=self.user2, action='Notified',
category='General notification', obj=1, url='http://example.com',
is_read=False
)
# Try and Read the notification as User1
self.assertRaises(
NotificationError,
read.send,
sender=self.__class__, notify_id=notification.id,
recipient=self.user1
)
def test_user_can_read_notifications(self):
"""A user can read their notification"""
# Create Notification for User1
notification = Notification.objects.create(
source=self.user2, source_display_name='User 2',
recipient=self.user1, action='Notified',
category='General notification', obj=1, url='http://example.com',
is_read=False
)
# Try and Read the notification as user1
read.send(
sender=self.__class__, notify_id=notification.id,
recipient=self.user1
)
notification.refresh_from_db()
self.assertEqual(notification.is_read, True)
def test_silent_notification(self):
"""Test Silent notifications."""
notify.send(
sender=self.__class__, source=self.user2,
source_display_name='User 2', recipient=self.user1,
action='Notified', category='Silent notification', obj=1,
url='http://example.com',
short_description='Short Description', is_read=False,
silent=True, channels=('console',)
)
notifications = Notification.objects.all()
self.assertEqual(notifications.count(), 0)
class JSONFieldTestCase(TestCase):
"""Test the Custom JSONField."""
User = get_user_model()
@classmethod
def setUpTestData(cls):
"""Create Users."""
cls.user1 = cls.User.objects.create_user(
username='[email protected]', password='password'
)
cls.user2 = cls.User.objects.create(
username='[email protected]', password='password'
)
def test_raise_exception(self):
"""
Should raise an exception
When we try to save objects that can't be serialized by
the json module.
"""
kwargs = {
'sender': self.__class__, 'source': self.user2,
'source_display_name': 'User 2', 'recipient': self.user1,
'action': 'Notified', 'category': 'General notification',
'obj': 1, 'short_description': 'Short Description',
'url': 'http://example.com', 'is_read': False,
'extra_data': {'hello': lambda x: 'world'},
'channels': ('console',)
}
self.assertRaises(TypeError, notify.send, **kwargs)
def test_json_decode(self):
"""Should return a dictionary back."""
notify.send(
sender=self.__class__, source=self.user2,
source_display_name='User 2', recipient=self.user1,
action='Notified', category='Notification with extra data', obj=1,
url='http://example.com',
short_description='Short Description', is_read=False,
extra_data={'hello': 'world'}, channels=('console',)
)
notification = Notification.objects.last()
self.assertEqual(
notification.extra_data, {'hello': 'world'}
)
class TestListField(TestCase):
"""Tests for the list field."""
User = get_user_model()
@classmethod
def setUpTestData(cls):
"""Create Users."""
cls.user1 = cls.User.objects.create_user(
username='[email protected]', password='password'
)
cls.user2 = cls.User.objects.create(
username='[email protected]', password='password'
)
def test_should_return_list(self):
"""Should return a list of channels back."""
notify.send(
sender=self.__class__, source=self.user2,
source_display_name='User 2', recipient=self.user1,
action='Notified', category='Notification with extra data', obj=1,
url='http://example.com',
short_description='Short Description', is_read=False,
extra_data={'hello': 'world'}, channels=('console', 'console')
)
notification = Notification.objects.last()
self.assertEqual(
notification.to_json()['channels'], ['console', 'console']
)
|
py | 1a4882d0c3a3dd58a8bcc9ddf3b33d0e137edf0b | #
# POC FTP Browser for Enigma2
#
# for localized messages
from . import _
# Config
from Components.config import config, ConfigInteger, ConfigSubList, \
ConfigSubsection, ConfigText, ConfigPassword, ConfigYesNo
config.plugins.ftpbrowser = ConfigSubsection()
config.plugins.ftpbrowser.server = ConfigSubList()
config.plugins.ftpbrowser.servercount = ConfigInteger(0)
i = 0
append = config.plugins.ftpbrowser.server.append
while i < config.plugins.ftpbrowser.servercount.value:
newServer = ConfigSubsection()
append(newServer)
newServer.name = ConfigText("Name", fixed_size=False)
newServer.address = ConfigText("192.168.2.12", fixed_size=False)
newServer.username = ConfigText("root", fixed_size=False)
newServer.password = ConfigPassword("dreambox")
newServer.port = ConfigInteger(21, (1, 65535))
newServer.passive = ConfigYesNo(False)
i += 1
del newServer
del append, i
from FTPBrowser import FTPBrowser
from FTPServerManager import ftpserverFromURI
ftpbrowser = None
def createSingleton(session):
global ftpbrowser
if not ftpbrowser:
ftpbrowser = session.instantiateDialog(FTPBrowser)
return False
return True
def main(session, **kwargs):
createSingleton(session)
session.execDialog(ftpbrowser)
def filescan_chosen(session, item):
if item:
createSingleton(session)
ftpbrowser.connect(ftpserverFromURI(item[1], save = False))
session.execDialog(ftpbrowser)
def filescan_open_connected(res, items, session, **kwargs):
if res:
ftpbrowser.disconnect()
filescan_open(items, session, **kwargs)
def filescan_open(items, session, **kwargs):
if createSingleton(session) and ftpbrowser.ftpclient:
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
session.openWithCallback(
boundFunction(filescan_open_connected, items, session, **kwargs),
MessageBox,
_("There already is an active connection.\nDo you want to abort it?"),
type = MessageBox.TYPE_YESNO
)
return
Len = len(items)
if Len > 1:
from Screens.ChoiceBox import ChoiceBox
from Tools.BoundFunction import boundFunction
session.openWithCallback(
boundFunction(filescan_chosen, session),
ChoiceBox,
_("Which server do you want to connect to?"),
[(item, item) for item in items]
)
elif Len:
filescan_chosen(items[0])
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
# Overwrite checkFile to detect remote files
class RemoteScanner(Scanner):
def checkFile(self, file):
return file.path.startswith("ftp://")
return [
RemoteScanner(
mimetypes = None,
paths_to_scan =
(
ScanPath(path = "", with_subdirs = False),
),
name = "Connect",
description = _("Connect to FTP..."),
openfnc = filescan_open,
),
]
def Plugins(**kwargs):
from Plugins.Plugin import PluginDescriptor
return [
PluginDescriptor(
name="FTPBrowser",
description = _("A basic FTP client"),
where = PluginDescriptor.WHERE_PLUGINMENU,
icon = "plugin.png",
fnc = main,
needsRestart = False
),
PluginDescriptor(
name = "FTPBrowser",
where = PluginDescriptor.WHERE_FILESCAN,
fnc = filescan,
needsRestart = False,
),
]
|
py | 1a4883bfa2c641a0ee48d991f4bbba2b1c3ddcb9 | from __future__ import absolute_import
import os
import re
import json
import base64
import inspect
import requests
import mimetypes
from contextlib import contextmanager
from datetime import datetime, timedelta
from django.conf import settings
from django.db import transaction
from pytz import utc
from random import randint
from six import StringIO
# Do not import from sentry here! Bad things will happen
optional_group_matcher = re.compile(r'\(\?\:([^\)]+)\)')
named_group_matcher = re.compile(r'\(\?P<(\w+)>[^\)]+\)')
non_named_group_matcher = re.compile(r'\([^\)]+\)')
# [foo|bar|baz]
either_option_matcher = re.compile(r'\[([^\]]+)\|([^\]]+)\]')
camel_re = re.compile(r'([A-Z]+)([a-z])')
API_PREFIX = '/api/0/'
scenarios = {}
def simplify_regex(pattern):
"""Clean up urlpattern regexes into something somewhat readable by
Mere Humans: turns something like
"^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into
"{sport_slug}/athletes/{athlete_slug}/"
"""
pattern = optional_group_matcher.sub(lambda m: '[%s]' % m.group(1), pattern)
# handle named groups first
pattern = named_group_matcher.sub(lambda m: '{%s}' % m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("{var}", pattern)
# handle optional params
pattern = either_option_matcher.sub(lambda m: m.group(1), pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '') \
.replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
def get_internal_endpoint_from_pattern(pattern):
from sentry.api.base import Endpoint
if not hasattr(pattern, 'callback'):
return
if hasattr(pattern.callback, 'cls'):
cls = pattern.callback.cls
if issubclass(cls, Endpoint):
return cls
elif hasattr(pattern.callback, 'cls_instance'):
inst = pattern.callback.cls_instance
if isinstance(inst, Endpoint):
return inst.__class__
def extract_documentation(func):
doc = inspect.getdoc(func)
if doc is not None:
return doc.decode('utf-8')
def get_endpoint_path(internal_endpoint):
return '%s.%s' % (internal_endpoint.__module__, internal_endpoint.__name__, )
def extract_title_and_text(doc):
title = None
iterable = iter((doc or u'').splitlines())
clean_end = False
for line in iterable:
line = line.strip()
if title is None:
if not line:
continue
title = line
elif line[0] * len(line) == line:
clean_end = True
break
else:
break
lines = []
if clean_end:
for line in iterable:
if line.strip():
lines.append(line)
break
lines.extend(iterable)
return title, lines
def camelcase_to_dashes(string):
def handler(match):
camel, regular = match.groups()
if len(camel) != 1:
camel = camel[:-1].lower() + '-' + camel[-1].lower()
else:
camel = camel.lower()
return '-' + camel + regular.lower()
return camel_re.sub(handler, string).lstrip('-')
def extract_endpoint_info(pattern, internal_endpoint):
path = simplify_regex(pattern.regex.pattern)
from sentry.constants import HTTP_METHODS
for method_name in HTTP_METHODS:
if method_name in ('HEAD', 'OPTIONS'):
continue
method = getattr(internal_endpoint, method_name.lower(), None)
if method is None:
continue
doc = extract_documentation(method)
if doc is None:
continue
section = getattr(internal_endpoint, 'doc_section', None)
if section is None:
continue
endpoint_name = method.__name__.title() + internal_endpoint.__name__
if endpoint_name.endswith('Endpoint'):
endpoint_name = endpoint_name[:-8]
endpoint_name = camelcase_to_dashes(endpoint_name)
title, text = extract_title_and_text(doc)
yield dict(
path=API_PREFIX + path.lstrip('/'),
method=method_name,
title=title,
text=text,
scenarios=getattr(method, 'api_scenarios', None) or [],
section=section.name.lower(),
internal_path='%s:%s' % (get_endpoint_path(internal_endpoint), method.__name__),
endpoint_name=endpoint_name,
)
def iter_endpoints():
from sentry.api.urls import urlpatterns
for pattern in urlpatterns:
internal_endpoint = get_internal_endpoint_from_pattern(pattern)
if internal_endpoint is None:
continue
for endpoint in extract_endpoint_info(pattern, internal_endpoint):
yield endpoint
def scenario(ident):
def decorator(f):
if ident in scenarios:
raise RuntimeError('Scenario duplicate: %s' % ident)
scenarios[ident] = f
f.api_scenario_ident = ident
return f
return decorator
def attach_scenarios(scenarios):
def decorator(f):
f.api_scenarios = [x.api_scenario_ident for x in scenarios]
return f
return decorator
def iter_scenarios():
# Make sure everything is imported.
for endpoint in iter_endpoints():
pass
return iter(sorted(scenarios.items()))
def get_sections():
from sentry.api.base import DocSection
return dict((x.name.lower(), x.value) for x in DocSection)
def create_sample_time_series(event):
from sentry.app import tsdb
group = event.group
now = datetime.utcnow().replace(tzinfo=utc)
for _ in range(60):
count = randint(1, 10)
tsdb.incr_multi(
((tsdb.models.project, group.project.id), (tsdb.models.group, group.id), ), now, count
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_received, group.project.organization_id),
(tsdb.models.project_total_received, group.project.id),
), now, int(count * 1.1)
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_rejected, group.project.organization_id),
(tsdb.models.project_total_rejected, group.project.id),
), now, int(count * 0.1)
)
now = now - timedelta(seconds=1)
for _ in range(24 * 30):
count = randint(100, 1000)
tsdb.incr_multi(
((tsdb.models.project, group.project.id), (tsdb.models.group, group.id), ), now, count
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_received, group.project.organization_id),
(tsdb.models.project_total_received, group.project.id),
), now, int(count * 1.1)
)
tsdb.incr_multi(
(
(tsdb.models.organization_total_rejected, group.project.organization_id),
(tsdb.models.project_total_rejected, group.project.id),
), now, int(count * 0.1)
)
now = now - timedelta(hours=1)
class MockUtils(object):
def create_user(self, mail):
from sentry.models import User
user, _ = User.objects.get_or_create(
username=mail, defaults={
'email': mail,
}
)
user.set_password('dummy')
user.save()
return user
def create_org(self, name, owner):
from sentry.models import Organization, OrganizationMember
org, _ = Organization.objects.get_or_create(
name=name,
)
dummy_member, _ = OrganizationMember.objects.get_or_create(
user=owner, organization=org, defaults={
'role': 'member',
}
)
return org
def create_api_key(self, org, label='Default'):
from sentry.models import ApiKey
return ApiKey.objects.get_or_create(
organization=org,
label=label,
scopes=(1 << len(ApiKey.scopes.keys())) - 1,
)[0]
def create_client_key(self, project, label='Default'):
from sentry.models import ProjectKey
return ProjectKey.objects.get_or_create(project=project, label=label)[0]
def create_team(self, name, org):
from sentry.models import Team
return Team.objects.get_or_create(
name=name,
defaults={
'organization': org,
},
)[0]
def create_project(self, name, teams, org):
from sentry.models import Project
project = Project.objects.get_or_create(
name=name, defaults={
'organization': org,
}
)[0]
for team in teams:
project.add_team(team)
return project
def create_release(self, project, user, version=None):
from sentry.models import Release, Activity
if version is None:
version = os.urandom(20).encode('hex')
with transaction.atomic():
release = Release.objects.filter(
version=version, organization_id=project.organization_id, projects=project
).first()
if not release:
release = Release.objects.filter(
version=version,
organization_id=project.organization_id,
).first()
if not release:
release = Release.objects.create(
version=version,
organization_id=project.organization_id,
)
release.add_project(project)
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=Activity.get_version_ident(version),
user=user,
data={'version': version},
)
return release
def create_release_file(self, project, release, path, content_type=None, contents=None):
from sentry.models import File, ReleaseFile
if content_type is None:
content_type = mimetypes.guess_type(path)[0] or 'text/plain'
if content_type.startswith('text/'):
content_type += '; encoding=utf-8'
f = File.objects.create(
name=path.rsplit('/', 1)[-1],
type='release.file',
headers={'Content-Type': content_type},
)
f.putfile(StringIO(contents or ''))
return ReleaseFile.objects.create(
organization_id=project.organization_id, release=release, file=f, name=path
)
def create_event(self, project, release, platform='python', raw=True):
from sentry.utils.samples import create_sample_event
event = create_sample_event(
project=project, platform=platform, release=release.version, raw=raw
)
create_sample_time_series(event)
return event
class Runner(object):
"""The runner is a special object that holds state for the automatic
running of example scenarios. It gets created by api-docs/generator.py
which does the majority of the heavy lifting. It mainly exists here
so that the scenarios can be run separately if needed.
"""
def __init__(self, ident, func, api_key, org, me, teams=None):
self.ident = ident
self.func = func
self.requests = []
self.utils = MockUtils()
self.api_key = api_key
self.org = org
self.me = me
self.teams = teams
@property
def default_team(self):
return self.teams[0]['team']
@property
def default_project(self):
return self.teams[0]['projects'][0]['project']
@property
def default_release(self):
return self.teams[0]['projects'][0]['release']
@property
def default_event(self):
return self.teams[0]['projects'][0]['events'][0]
@contextmanager
def isolated_project(self, project_name):
from sentry.models import Group, Event
project = self.utils.create_project(project_name, teams=[self.default_team], org=self.org)
release = self.utils.create_release(project=project, user=self.me)
self.utils.create_event(project=project, release=release, platform='python')
self.utils.create_event(project=project, release=release, platform='java')
try:
yield project
finally:
# Enforce safe cascades into Group/Event
Group.objects.filter(
project=project,
).delete()
Event.objects.filter(
project_id=project.id,
).delete()
project.delete()
@contextmanager
def isolated_org(self, org_name):
from sentry.models import Group, Event
org = self.utils.create_org(org_name, owner=self.me)
try:
yield org
finally:
# Enforce safe cascades into Group/Event
Group.objects.filter(
project__organization=org,
).delete()
Event.objects.filter(
project_id__in=org.project_set.values('id'),
).delete()
org.delete()
def request(self, method, path, headers=None, data=None, api_key=None, format='json'):
if api_key is None:
api_key = self.api_key
path = '/api/0/' + path.lstrip('/')
headers = dict(headers or {})
request_is_json = True
body = None
files = None
was_multipart = False
if data is not None:
if format == 'json':
body = json.dumps(data, sort_keys=True)
headers['Content-Type'] = 'application/json'
elif format == 'multipart':
files = {}
for key, value in data.items():
if hasattr(value, 'read') or isinstance(value, tuple):
files[key] = value
del data[key]
was_multipart = True
body = data
req_headers = dict(headers)
req_headers['Host'] = 'sentry.io'
req_headers['Authorization'] = \
'Basic %s' % base64.b64encode('%s:' % (api_key.key.encode('utf-8')))
url = 'http://127.0.0.1:%s%s' % (settings.SENTRY_APIDOCS_WEB_PORT, path, )
response = requests.request(
method=method, url=url, files=files, headers=req_headers, data=body
)
response_headers = dict(response.headers)
# Don't want those
response_headers.pop('server', None)
response_headers.pop('date', None)
if response.headers.get('Content-Type') == 'application/json':
response_data = response.json()
is_json = True
else:
response_data = response.text
is_json = False
if was_multipart:
headers['Content-Type'] = response.request.headers['content-type']
data = response.request.body
request_is_json = False
rv = {
'request': {
'method': method,
'path': path,
'headers': headers,
'data': data,
'is_json': request_is_json,
},
'response': {
'headers': response_headers,
'status': response.status_code,
'reason': response.reason,
'data': response_data,
'is_json': is_json,
}
}
self.requests.append(rv)
return rv
def to_json(self):
doc = extract_documentation(self.func)
title, text = extract_title_and_text(doc)
return {
'ident': self.ident,
'requests': self.requests,
'title': title,
'text': text,
}
|
py | 1a4883f6b290e2426a12a2ecfc40fc0e7aeb1333 | import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="better-ffmpeg-progress",
version="2.0.0",
author="GitHub.com/CrypticSignal",
author_email="[email protected]",
description="Run FFmpeg & see percentage progress + ETA.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CrypticSignal/better-ffmpeg-progress",
packages=["better_ffmpeg_progress"],
install_requires=["ffmpeg-python", "tqdm"],
python_requires=">=3.6",
keywords=["ffmpeg", "progress"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
) |
py | 1a488844dec2133d3490cb03a0f610052a14175c | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
from six.moves import reduce
from .. import core
from ..layers import utils
from ..layers import nn as F
from .. import dygraph_utils
from . import layers
from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator, default_main_program
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant, NumpyArrayInitializer
from .. import unique_name
from .layer_object_helper import LayerObjectHelper
from ..data_feeder import check_variable_and_dtype, check_type
import numpy as np
import numbers
import logging
import paddle.utils.deprecated as deprecated
__all__ = [
'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding',
'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu',
'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm',
'SpectralNorm', 'TreeConv', 'Flatten'
]
class Conv2D(layers.Layer):
"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \\sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output
feature map.
filter_size (int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding (int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups (int, optional): The groups number of the Conv2d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filter of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Raises:
ValueError: if ``use_cudnn`` is not a bool value.
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D
import numpy as np
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
conv2d = Conv2D(3, 2, 3)
data = to_variable(data)
conv = conv2d(data)
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv2D, self).__init__()
self._num_channels = num_channels
self._groups = groups
self._stride = utils.convert_to_list(stride, 2, 'stride')
self._padding = utils.convert_to_list(padding, 2, 'padding')
self._dilation = utils.convert_to_list(dilation, 2, 'dilation')
self._act = act
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_cudnn = use_cudnn
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if (self._num_channels == self._groups and
num_filters % self._num_channels == 0 and
not self._use_cudnn and not self._use_mkldnn):
self._l_type = 'depthwise_conv2d'
else:
self._l_type = 'conv2d'
self._num_channels = num_channels
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 2, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[
1] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
if in_dygraph_mode() and self._l_type == 'conv2d':
attrs = ('strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups
if self._groups else 1, 'use_cudnn', self._use_cudnn,
'use_mkldnn', self._use_mkldnn)
out = core.ops.conv2d(input, self.weight, *attrs)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
inputs = {
'Input': [input],
'Filter': [self.weight],
}
attrs = {
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': self._use_mkldnn,
}
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'Conv2D')
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={
'Input': input,
'Filter': self.weight,
},
outputs={"Output": pre_bias},
attrs=attrs)
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1,
'use_mkldnn': self._use_mkldnn})
else:
pre_act = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_act, act=self._act)
class Conv3D(layers.Layer):
"""
**Convlution3D Layer**
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional tensors with a shape of
:math:`[N, C, D, H, W]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of filter. It is as same as the output image channel.
filter_size (int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square, filter_size_depth = filter_size_height
= filter_size_width = filter_size.
stride (int|tuple, optional): The stride size. If stride is a tuple, it must
contain three integers, (stride_D, stride_H, stride_W). Otherwise, the
stride_D = stride_H = stride_W = stride. The default value is 1.
padding (int|tuple, optional): The padding size. If padding is a tuple, it must
contain three integers, (padding_D, padding_H, padding_W). Otherwise, the
padding_D = padding_H = padding_W = padding. The default value is 0.
dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups (int, optional): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3d = fluid.dygraph.nn.Conv3D(
num_channels=3, num_filters=2, filter_size=3, act="relu")
ret = conv3d(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
assert param_attr is not False, "param_attr should not be False here."
super(Conv3D, self).__init__()
self._num_channels = num_channels
self._groups = groups
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._act = act
self._use_cudnn = use_cudnn
self._filter_size = filter_size
self._num_filters = num_filters
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if self._groups is None:
num_filter_channels = self._num_channels
else:
if self._num_channels % self._groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = self._num_channels // self._groups
filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size')
filter_shape = [self._num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
2] * self._num_channels
std = (2.0 / filter_elem_num)**0.5
return Normal(0.0, std, 0)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
default_initializer=_get_default_param_initializer())
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='conv3d',
inputs={
'Input': input,
'Filter': self.weight,
},
outputs={"Output": pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn,
'use_mkldnn': False
})
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class Conv3DTranspose(layers.Layer):
"""
**Convlution3D transpose layer**
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
**Note**:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
image channel.
filter_size(int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
padding(int|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
The default value is 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
The default value is 1.
dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must
contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
The default value is 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. The default value is None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. The default value is True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
The default value is None.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
None.
Raises:
ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose(
num_channels=3,
num_filters=12,
filter_size=12,
use_cudnn=False)
ret = conv3dTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
super(Conv3DTranspose, self).__init__()
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
self._padding = utils.convert_to_list(padding, 3, 'padding')
self._stride = utils.convert_to_list(stride, 3, 'stride')
self._dilation = utils.convert_to_list(dilation, 3, 'dilation')
self._param_attr = param_attr
self._num_channels = num_channels
self._filter_size = filter_size
self._groups = 1 if groups is None else groups
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._bias_attr = bias_attr
self._act = act
self._dtype = dtype
self._filter_size = utils.convert_to_list(
self._filter_size, 3, 'conv3d_transpose.filter_size')
filter_shape = [self._num_channels, self._num_filters // self._groups
] + self._filter_size
self.weight = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="conv3d_transpose",
inputs={'Input': [input],
'Filter': [self.weight]},
outputs={'Output': pre_bias},
attrs={
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups if self._groups else 1,
'use_cudnn': self._use_cudnn
})
if self._bias_attr:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_act, act=self._act)
class Pool2D(layers.Layer):
"""
:alias_main: paddle.nn.Pool2D
:alias: paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D
:old_api: paddle.fluid.dygraph.Pool2D
This interface is used to construct a callable object of the ``Pool2D`` class.
For more details, refer to code examples.
The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,
pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
- Input:
Input shape: :math:`(N, C, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C, H_{out}, W_{out})`
If ``ceil_mode`` = False:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
If ``ceil_mode`` = True:
.. math::
H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\
W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
If ``exclusive`` = False:
.. math::
hstart &= i * strides[0] - paddings[0] \\\\
hend &= hstart + ksize[0] \\\\
wstart &= j * strides[1] - paddings[1] \\\\
wend &= wstart + ksize[1] \\\\
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
If ``exclusive`` = True:
.. math::
hstart &= max(0, i * strides[0] - paddings[0])\\\\
hend &= min(H, hstart + ksize[0]) \\\\
wstart &= max(0, j * strides[1] - paddings[1]) \\\\
wend & = min(W, wstart + ksize[1]) \\\\
Output(i ,j) & = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Parameters:
pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width).
Otherwise, the pool kernel size will be a square of an int. Default: -1.
pool_type(str, optional) : The pooling type, can be "max" for max-pooling and "avg" for average-pooling.
Default: max.
pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,
the pool stride size will be a square of an int. Default: 1.
pool_padding (int or list or tuple, optional): The padding size for pooling operation.
If ``pool_padding`` is a tuple,
it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).
Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.
global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,
kernel size and paddings will be ignored. Default: False.
use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.
ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.
False is the default. If it is set to False, the floor function will be used. Default: False.
exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.
data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
``[batch_size, input_channels, input_height, input_width]``. When it is `"NHWC"`, the data is
stored in the order of: ``[batch_size, input_height, input_width, input_channels]``
Returns:
None
Raises:
ValueError: If ``pool_type`` is not "max" nor "avg".
ValueError: If ``global_pooling`` is False and ``pool_size`` is -1.
ValueError: If ``use_cudnn`` is not a bool value.
ValueError: If ``data_format`` is not "NCHW" nor "NHWC".
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
with fluid.dygraph.guard():
data = numpy.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D(pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
pool2d_res = pool2d(to_variable(data))
"""
def __init__(self,
pool_size=-1,
pool_type="max",
pool_stride=1,
pool_padding=0,
global_pooling=False,
use_cudnn=True,
ceil_mode=False,
exclusive=True,
data_format="NCHW"):
data_format = data_format.upper() # supprt NHWC, nhwc, etc.
pool_type = pool_type.lower() # supprt max, Max, etc.
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if global_pooling is False and pool_size == -1:
raise ValueError(
"When the global_pooling is False, pool_size must be passed "
"and be a valid value. Received pool_size: " + str(pool_size))
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
super(Pool2D, self).__init__()
self._pool_type = pool_type
self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
self._pool_padding = utils.convert_to_list(pool_padding, 2,
'pool_padding')
self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')
self._global_pooling = global_pooling
self._use_cudnn = use_cudnn
self._ceil_mode = ceil_mode
self._exclusive = exclusive
self._data_format = data_format
self._l_type = 'pool2d'
def forward(self, input):
if in_dygraph_mode():
attrs = ('pooling_type', self._pool_type, 'ksize', self._pool_size,
'global_pooling', self._global_pooling, 'strides',
self._pool_stride, 'paddings', self._pool_padding,
'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode,
'use_mkldnn', self._use_mkldnn, 'exclusive',
self._exclusive, 'data_format', self._data_format)
return core.ops.pool2d(input, *attrs)
check_variable_and_dtype(
input, 'input', ['int8', 'uint8', 'float16', 'float32', 'float64'],
'Pool2D')
attrs = {
"pooling_type": self._pool_type,
"ksize": self._pool_size,
"global_pooling": self._global_pooling,
"strides": self._pool_stride,
"paddings": self._pool_padding,
"use_cudnn": self._use_cudnn,
"ceil_mode": self._ceil_mode,
"use_mkldnn": self._use_mkldnn,
"exclusive": self._exclusive,
"data_format": self._data_format,
}
inputs = {"X": [input]}
pool_out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type=self._l_type,
inputs={"X": input},
outputs={"Out": pool_out},
attrs=attrs)
return pool_out
class Linear(layers.Layer):
"""
Fully-connected linear transformation layer:
.. math::
Out = Act({XW + b})
where :math:`X` is the input Tensor, :math:`W` and :math:`b` are weight and bias respectively.
Linear layer takes only one ``Tensor`` input.
The Linear layer multiplies input tensor with weight matrix and
produces an output Tensor of shape [N, *, `output_dim`],
where N is batch size and `*` means any number of additional dimensions.
If ``bias_attr`` is not None, a bias variable will be created and added to the output.
Finally, if ``act`` is not None, it will be applied to the output as well.
Parameters:
input_dim(int): The number of input units in this layer.
output_dim(int): The number of output units in this layer.
param_attr(ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable
weights(Parameter) of this layer. Default: None.
bias_attr(ParamAttr or list of ParamAttr, optional): The attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of this layer. Default: None.
dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32".
Attributes:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
res = linear(data) # [30, 10, 64]
"""
def __init__(self,
input_dim,
output_dim,
param_attr=None,
bias_attr=None,
act=None,
dtype="float32"):
super(Linear, self).__init__()
self._act = act
self._dtype = dtype
self.weight = self.create_parameter(
shape=[input_dim, output_dim],
attr=param_attr,
dtype=dtype,
is_bias=False)
self.bias = self.create_parameter(
shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True)
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
def forward(self, input):
if in_dygraph_mode():
pre_bias = _varbase_creator(dtype=input.dtype)
core.ops.matmul(input, self.weight, pre_bias, 'transpose_X', False,
'transpose_Y', False, "alpha", 1, "use_mkldnn",
self._use_mkldnn)
pre_act = dygraph_utils._append_bias_in_dygraph(
pre_bias,
self.bias,
axis=len(input.shape) - 1,
use_mkldnn=self._use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
pre_act, self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], "Linear")
attrs = {
"transpose_X": False,
"transpose_Y": False,
"alpha": 1,
"use_mkldnn": self._use_mkldnn,
}
inputs = {"X": [input], "Y": [self.weight]}
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="matmul", inputs=inputs, outputs={"Out": tmp}, attrs=attrs)
if self.bias is not None:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={
'axis': len(input.shape) - 1,
'use_mkldnn': self._use_mkldnn
})
else:
pre_activation = tmp
return self._helper.append_activation(pre_activation, act=self._act)
class InstanceNorm(layers.Layer):
"""
This interface is used to construct a callable object of the ``InstanceNorm`` class.
For more details, refer to code examples.
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
\\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
epsilon(float, optional): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
one. If it is set to False, will not create param_attr. Default: None.
bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
If it is set to False, will not create bias_attr. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle
# x's shape is [1, 3, 1, 2]
x = np.array([[[[1.0, 8.0]], [[10.0, 5.0]], [[4.0, 6.0]]]]).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
instanceNorm = paddle.nn.InstanceNorm(3)
ret = instanceNorm(x)
# ret's shape is [1, 3, 1, 2]; value is [-1 1 0.999999 -0.999999 -0.999995 0.999995]
print(ret)
"""
def __init__(self,
num_channels,
epsilon=1e-5,
param_attr=None,
bias_attr=None,
dtype='float32'):
super(InstanceNorm, self).__init__()
if param_attr == False or bias_attr == False:
assert bias_attr == param_attr, "param_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._dtype = dtype
if param_attr != False and bias_attr != False:
self.scale = self.create_parameter(
attr=self._param_attr,
shape=[num_channels],
dtype=self._dtype,
default_initializer=Constant(1.0),
is_bias=False)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[num_channels],
dtype=self._dtype,
default_initializer=Constant(0.0),
is_bias=True)
else:
self.scale = None
self.bias = None
def forward(self, input):
if in_dygraph_mode():
out, _, _ = core.ops.instance_norm(input, self.scale, self.bias,
'epsilon', self._epsilon)
return out
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
"InstanceNorm")
attrs = {"epsilon": self._epsilon}
if self.scale and self.bias:
inputs = {"X": [input], "Scale": [self.scale], "Bias": [self.bias]}
else:
inputs = {"X": [input]}
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
instance_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
outputs = {
"Y": [instance_norm_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
self._helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return instance_norm_out
class BatchNorm(layers.Layer):
"""
:alias_main: paddle.nn.BatchNorm
:alias: paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm
:old_api: paddle.fluid.dygraph.BatchNorm
This interface is used to construct a callable object of the ``BatchNorm`` class.
For more details, refer to code examples.
It implements the function of the Batch Normalization Layer and can be used
as a normalizer function for conv2d and fully connected operations.
The data is normalized by the mean and variance of the channel based on the current batch data.
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
When use_global_stats = False, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are the statistics of one mini-batch.
Calculated as follows:
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
- :math:`x` : mini-batch data
- :math:`m` : the size of the mini-batch data
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global or running statistics (moving_mean and moving_variance). It usually got from the
pre-trained model. Calculated as follows:
.. math::
moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\
moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\
The normalization function formula is as follows:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
- :math:`\\epsilon` : add a smaller value to the variance to prevent division by zero
- :math:`\\gamma` : trainable proportional parameter
- :math:`\\beta` : trainable deviation parameter
Parameters:
num_channels(int): Indicate the number of channels of the input ``Tensor``.
act(str, optional): Activation to be applied to the output of batch normalization. Default: None.
is_test (bool, optional): A flag indicating whether it is in test phrase or not.
This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
Default: False.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
dtype(str, optional): Indicate the data type of the input ``Tensor``,
which can be float32 or float64. Default: float32.
data_layout(str, optional): Specify the input data format, the data format can be "NCHW" or "NHWC". Default: NCHW.
in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.
moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.
moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.
do_model_average_for_mean_and_var(bool, optional): Whether parameter mean and variance should do model
average when model average is enabled. Default: True.
use_global_stats(bool, optional): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period. Default: False.
trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when
setting trainable_statistics True, mean and variance will be calculated by current batch statistics.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
batch_norm = fluid.BatchNorm(10)
hidden1 = batch_norm(x)
"""
def __init__(self,
num_channels,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
dtype='float32',
data_layout='NCHW',
in_place=False,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
trainable_statistics=False):
super(BatchNorm, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
if dtype == "float16":
self._dtype = "float32"
else:
self._dtype = dtype
param_shape = [num_channels]
# create parameter
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
self.weight.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
self.bias.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.
self._mean = self.create_parameter(
attr=ParamAttr(
name=moving_mean_name,
initializer=Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._mean.stop_gradient = True
self._variance = self.create_parameter(
attr=ParamAttr(
name=moving_variance_name,
initializer=Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var),
shape=param_shape,
dtype=self._dtype)
self._variance.stop_gradient = True
self._in_place = in_place
self._data_layout = data_layout
self._momentum = momentum
self._epsilon = epsilon
self._is_test = is_test
self._fuse_with_relu = False
self._use_global_stats = use_global_stats
self._trainable_statistics = trainable_statistics
def forward(self, input):
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
if in_dygraph_mode():
attrs = ("momentum", self._momentum, "epsilon", self._epsilon,
"is_test", not self.training, "data_layout",
self._data_layout, "use_mkldnn", self._use_mkldnn,
"fuse_with_relu", self._fuse_with_relu, "use_global_stats",
self._use_global_stats, 'trainable_statistics',
self._trainable_statistics)
batch_norm_out, _, _, _, _, _ = core.ops.batch_norm(
input, self.weight, self.bias, self._mean, self._variance,
mean_out, variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'], 'BatchNorm')
attrs = {
"momentum": self._momentum,
"epsilon": self._epsilon,
"is_test": self._is_test,
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats,
"trainable_statistics": self._trainable_statistics,
}
inputs = {
"X": [input],
"Scale": [self.weight],
"Bias": [self.bias],
"Mean": [self._mean],
"Variance": [self._variance]
}
saved_mean = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(
self._dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [mean_out],
"VarianceOut": [variance_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
self._helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act)
class Dropout(layers.Layer):
"""
This interface is used to construct a callable object of the ``Dropout`` class.
For more details, refer to code examples.
Drop or keep each element of input independently. Dropout is a regularization
technique for reducing overfitting by preventing neuron co-adaption during
training. The dropout operator randomly sets (according to the given dropout
probability) the outputs of some units to zero, while others are remain
unchanged.
Dropout layer can be removed for efficiency concern.
Parameters:
p (float, optional): Probability of setting units to zero. Default: 0.5
seed (int, optional): A Python integer used to create random seeds. If this
parameter is set to None, a random seed is used.
NOTE: If an integer seed is given, always the same output
units will be dropped. DO NOT use a fixed seed in training. Default: None.
dropout_implementation(string, optional): ['downgrade_in_infer'(default)|'upscale_in_train']
1. downgrade_in_infer(default), downgrade the outcome at inference
- train: out = input * mask
- inference: out = input * (1.0 - p)
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is dropout_prob)
2. upscale_in_train, upscale the outcome at training time
- train: out = input * mask / ( 1.0 - p )
- inference: out = input
(mask is a tensor same shape with input, value is 0 or 1
ratio of 0 is p)
is_test (bool, optional): A flag indicating whether it is in test phrase or not.
This flag only has effect on static graph mode. For dygraph mode, please use ``eval()``.
Default: False.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
m = fluid.dygraph.Dropout(p=0.5)
droped_train = m(x)
# switch to eval mode
m.eval()
droped_eval = m(x)
"""
def __init__(self,
p=0.5,
seed=None,
dropout_implementation="downgrade_in_infer",
is_test=False):
super(Dropout, self).__init__()
assert isinstance(p, (float, int)), "p argument should be a number"
assert 0 <= p <= 1, "p argument should between 0 and 1"
self._dropout_prob = p
assert seed is None or isinstance(
seed, int), "seed argument should be None or a integer"
self._seed = seed
assert dropout_implementation in (
'downgrade_in_infer', 'upscale_in_train'
), "dropout_implementation argument should be 'downgrade_in_infer' or 'upscale_in_train'"
self._dropout_implementation = dropout_implementation
self._is_test = is_test
def forward(self, input):
prog = default_main_program()
if (self._seed is None or self._seed == 0) and prog.random_seed != 0:
self._seed = prog.random_seed
attrs = {
'dropout_prob': self._dropout_prob,
'is_test': not self.training
if in_dygraph_mode() else self._is_test,
'fix_seed': self._seed is not None,
'seed': self._seed if self._seed is not None else 0,
'dropout_implementation': self._dropout_implementation,
}
if in_dygraph_mode():
attrs = sum(attrs.items(), ())
out, mask = core.ops.dropout(input, *attrs)
return out
out = self._helper.create_variable_for_type_inference(dtype=input.dtype)
mask = self._helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
self._helper.append_op(
type='dropout',
inputs={'X': [input]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
class Embedding(layers.Layer):
"""
:alias_main: paddle.nn.Embedding
:alias: paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding
:old_api: paddle.fluid.dygraph.Embedding
**Embedding Layer**
This interface is used to construct a callable object of the ``Embedding`` class.
For specific usage, refer to code examples. It implements the function of the Embedding Layer.
This layer is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[1, 3], [2, 4], [4, 127]
input.shape = [3, 2]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Parameters:
size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size
of the dictionary of embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizer does not support sparse update,
such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
:ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
:ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
In these case, is_sparse must be False. Default: False.
is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
in multi-machine distributed CPU training. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
The local word vector needs to be transformed into numpy format, and the shape of local word
vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
is used to load custom or pre-trained word vectors. See code example 2 for details.
dtype(np.dtype|core.VarDesc.VarType|str): It refers to the data type of output Tensor.
It must be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy as np
# example 1
inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64')
inp_word.shape # [2, 3]
dict_size = 20
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
static_rlt3.shape # [2, 3, 32]
# example 2: load custom or pre-trained word vectors
weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format
w_param_attrs = fluid.ParamAttr(
name="emb_weight",
learning_rate=0.5,
initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
trainable=True)
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding(
size=[128, 100],
param_attr= w_param_attrs,
is_sparse=False)
static_rlt3 = emb(base.to_variable(inp_word))
"""
def __init__(self,
size,
is_sparse=False,
is_distributed=False,
padding_idx=None,
param_attr=None,
dtype='float32'):
super(Embedding, self).__init__()
self._size = size
self._is_sparse = is_sparse
self._is_distributed = is_distributed
self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
self._param_attr = param_attr
self._dtype = dtype
self._remote_prefetch = self._is_sparse and (not self._is_distributed)
if self._remote_prefetch:
assert self._is_sparse is True and self._is_distributed is False
self.weight = self.create_parameter(
attr=self._param_attr,
shape=self._size,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
if in_dygraph_mode():
return core.ops.lookup_table_v2(
self.weight, input, 'is_sparse', self._is_sparse,
'is_distributed', self._is_distributed, 'remote_prefetch',
self._remote_prefetch, 'padding_idx', self._padding_idx)
check_variable_and_dtype(input, 'input', ['int64'], 'Embedding')
attrs = {
'is_sparse': self._is_sparse,
'is_distributed': self._is_distributed,
'remote_prefetch': self._remote_prefetch,
'padding_idx': self._padding_idx
}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='lookup_table_v2',
inputs={'Ids': input,
'W': self.weight},
outputs={'Out': out},
attrs=attrs)
return out
class LayerNorm(layers.Layer):
"""
:alias_main: paddle.nn.LayerNorm
:alias: paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm
:old_api: paddle.fluid.dygraph.LayerNorm
This interface is used to construct a callable object of the ``LayerNorm`` class.
For more details, refer to code examples.
It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
.. math::
\\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
\\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.
Parameters:
normalized_shape(int or list or tuple): Input shape from an expected input of
size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
If it is a single integer, this module will normalize over the last dimension
which is expected to be of that specific size.
scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True.
shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
normalization. Default: True.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as scale. The
:attr:`param_attr` is initialized as 1 if it is added. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
a default :code:`ParamAttr` would be added as bias. The
:attr:`bias_attr` is initialized as 0 if it is added. Default: None.
act(str, optional): Activation to be applied to the output of layer normalization.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy
x = numpy.random.random((3, 32, 32)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
layerNorm = fluid.LayerNorm([32, 32])
ret = layerNorm(x)
"""
def __init__(self,
normalized_shape,
scale=True,
shift=True,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
dtype='float32'):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = [normalized_shape]
self._normalized_shape = list(normalized_shape)
self._scale = scale
self._shift = shift
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._dtype = dtype
param_shape = [np.prod(self._normalized_shape)]
if self._scale:
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
else:
if self._param_attr:
logging.warn("param_attr are only available with scale is True")
self.weight = None
if self._shift:
assert self._bias_attr is not False
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
else:
if self._bias_attr:
logging.warn("bias_attr are only available with shift is True")
self.bias = None
def forward(self, input):
input_shape = list(input.shape)
input_ndim = len(input_shape)
normalized_ndim = len(self._normalized_shape)
self._begin_norm_axis = input_ndim - normalized_ndim
if input_ndim < normalized_ndim or input_shape[
self._begin_norm_axis:] != self._normalized_shape:
str_normalized_shape = str(self._normalized_shape)
raise ValueError(
'Given normalized_shape is ' + str_normalized_shape +
', expected input with shape [*, ' + str_normalized_shape[
1:] + ', but got input shape ' + str(input_shape))
if in_dygraph_mode():
pre_act, _, _ = core.ops.layer_norm(
input, self.weight, self.bias, 'epsilon', self._epsilon,
'begin_norm_axis', self._begin_norm_axis)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'LayerNorm')
inputs = dict()
inputs['X'] = [input]
if self._scale:
inputs['Scale'] = [self.weight]
if self._shift:
inputs['Bias'] = [self.bias]
attrs = {
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
}
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
layer_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": self._epsilon,
"begin_norm_axis": self._begin_norm_axis
})
return self._helper.append_activation(layer_norm_out, act=self._act)
class GRUUnit(layers.Layer):
"""
**GRU unit layer**
It creates a callable object from GRUUnit class.
If origin_mode is True, then the equation of a gru step is from paper
`Learning Phrase Representations using RNN Encoder-Decoder for Statistical
Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
If origin_mode is False, then the equation of a gru step is from paper
`Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_
.. math::
u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)
r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)
m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)
h_t & = dot((1-u_t), h_{t-1}) + dot(u_t, m_t)
The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms
of the equation above, the :math:`z_t` is split into 3 parts -
:math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to
implement a full GRU unit operator for an input, a fully
connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.
The terms :math:`u_t` and :math:`r_t` represent the update and reset gates
of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is
an intermediate candidate hidden output, which is denoted by :math:`m_t`.
This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`
and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.
Parameters:
size (int): The input dimension value.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
hidden-hidden weight matrix.
**Note**:
1. The shape of the weight matrix is :math:`[T, 3*D]`, where D is the hidden size.
2. All elements in the weight matrix can be divided into two parts. The first
part are weights of the update gate and reset gate with shape :math:`[D, 2*D]`,
and the second part are weights for candidate hidden state with shape :math:`[D, D]`.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. The default
value is None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias
of GRU.Note that the bias with :math:`[1, 3*D]` concatenates
the bias in the update gate, reset gate and candidate calculations.
If it is set to False, no bias will be applied to the update gate,
reset gate and candidate calculations. If it is set to None or one
attribute of ParamAttr, gru_unit will create ParamAttr as
bias_attr. If the Initializer of the bias_attr is not set, the bias
is initialized zero. The default value is None.
activation (str): The activation type for cell (actNode).
The default value is 'tanh'.
gate_activation (str): The activation type for gates (actGate).
The default value is 'sigmoid'.
dtype(str): The dtype of the layers. The data type can be set as
'float32', 'float64'. The default value is 'float32'.
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
tuple: The hidden value, reset-hidden value and gate values. The hidden value
is a 2-D tensor with shape :math:`[T, D]` . The reset-hidden value is a
2-D tensor with shape :math:`[T, D]` . The gate value is a 2-D tensor with
shape :math:`[T, 3*D]`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph.base as base
import numpy
lod = [[2, 4, 3]]
D = 5
T = sum(lod[0])
input = numpy.random.rand(T, 3 * D).astype('float32')
hidden_input = numpy.random.rand(T, D).astype('float32')
with fluid.dygraph.guard():
x = numpy.random.random((3, 32, 32)).astype('float32')
gru = fluid.dygraph.GRUUnit(size=D * 3)
dy_ret = gru(
base.to_variable(input), base.to_variable(hidden_input))
"""
def __init__(self,
size,
param_attr=None,
bias_attr=None,
activation='tanh',
gate_activation='sigmoid',
origin_mode=False,
dtype='float32'):
super(GRUUnit, self).__init__()
self._bias_attr = bias_attr
activation_dict = dict(
identity=0,
sigmoid=1,
tanh=2,
relu=3, )
self.activation = activation_dict[activation]
self.gate_activation = activation_dict[gate_activation]
self._dtype = dtype
size = size // 3
# create weight
self.weight = self.create_parameter(
attr=param_attr, shape=[size, 3 * size], dtype=dtype)
# create bias
bias_size = [1, 3 * size]
self._bias_size = bias_size
self.bias = self.create_parameter(
attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
def forward(self, input, hidden):
if in_dygraph_mode():
gate, reset_hidden_pre, updated_hidden = core.ops.gru_unit(
input, hidden, self.weight, self.bias, 'activation',
self.activation, 'gate_activation', self.gate_activation)
return updated_hidden, reset_hidden_pre, gate
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'GRUUnit')
check_variable_and_dtype(hidden, 'hidden', ['float32', 'float64'],
'GRUUnit')
inputs = {
'Input': [input],
'HiddenPrev': [hidden],
'Weight': [self.weight]
}
if self.bias is not None:
inputs['Bias'] = [self.bias]
gate = self._helper.create_variable_for_type_inference(self._dtype)
reset_hidden_pre = self._helper.create_variable_for_type_inference(
self._dtype)
updated_hidden = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type='gru_unit',
inputs=inputs,
outputs={
'Gate': gate,
'ResetHiddenPrev': reset_hidden_pre,
'Hidden': updated_hidden,
},
attrs={
'activation': self.activation,
'gate_activation': self.gate_activation,
})
return updated_hidden, reset_hidden_pre, gate
class NCE(layers.Layer):
"""
This interface is used to construct a callable object of the ``NCE`` class.
For more details, refer to code examples.
It implements the function of the ``NCE`` loss function.
By default this function uses a uniform distribution for sampling, and it
compute and return the noise-contrastive estimation training loss. See
`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ .
Parameters:
num_total_classes (int): Total number of classes in all samples.
dim (int): Dimension of input (possibly embedding dim).
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of nce. If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, nce
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
num_neg_samples (int, optional): The number of negative classes. The default value is 10.
sampler (str, optional): The sampler used to sample class from negative classes.
It can be 'uniform', 'log_uniform' or 'custom_dist'.
default: 'uniform'.
custom_dist (float[], optional): A float[] with size=num_total_classes.
It is used when sampler is set to 'custom_dist'.
custom_dist[i] is the probability of i-th class to be sampled.
Default: None.
seed (int, optional): The seed used in sampler. Default: 0.
is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
window_size = 5
dict_size = 20
label_word = int(window_size // 2) + 1
inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
with fluid.dygraph.guard():
words = []
for i in range(window_size):
words.append(fluid.dygraph.base.to_variable(inp_word[i]))
emb = fluid.Embedding(
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
embs3 = []
for i in range(window_size):
if i == label_word:
continue
emb_rlt = emb(words[i])
embs3.append(emb_rlt)
embs3 = fluid.layers.concat(input=embs3, axis=1)
nce = fluid.NCE(
num_total_classes=dict_size,
dim=embs3.shape[1],
num_neg_samples=2,
sampler="custom_dist",
custom_dist=nid_freq_arr.tolist(),
seed=1,
param_attr='nce.w',
bias_attr='nce.b')
wl = fluid.layers.unsqueeze(words[label_word], axes=[0])
nce_loss3 = nce(embs3, wl)
"""
def __init__(self,
num_total_classes,
dim,
sample_weight=None,
param_attr=None,
bias_attr=None,
num_neg_samples=None,
sampler="uniform",
custom_dist=None,
seed=0,
is_sparse=False,
dtype='float32'):
super(NCE, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._num_total_classes = num_total_classes
self._dtype = dtype
self._inputs = dict()
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
if sampler == "uniform":
sampler = 0
elif sampler == "log_uniform":
sampler = 1
elif sampler == "custom_dist":
assert custom_dist is not None
# assert isinstance(custom_dist, Variable)
custom_dist_len = len(custom_dist)
alias_probs_ = [0] * custom_dist_len
alias_ = [0] * custom_dist_len
bigs = []
littles = []
for i in range(custom_dist_len):
normal_prob = custom_dist[i] * custom_dist_len
if normal_prob - 1.0 > 0:
bigs.append((i, normal_prob))
elif 1.0 - normal_prob > 0:
littles.append((i, normal_prob))
else:
alias_probs_[i] = normal_prob
alias_[i] = -1
while len(bigs) and len(littles):
big = bigs.pop(0)
little = littles.pop(0)
big_idx = big[0]
big_prob = big[1]
alias_probs_[little[0]] = little[1]
alias_[little[0]] = big_idx
big_left = big[1] + little[1] - 1
if big_left - 1.0 > 0:
bigs.append((big_idx, big_left))
elif 1.0 - big_left > 0:
littles.append((big_idx, big_left))
else:
alias_probs_[big_idx] = big_left
alias_[big_idx] = -1
if len(bigs):
big = bigs.pop(0)
alias_probs_[big[0]] = 1.0
alias_[big[0]] = -1
if len(littles):
little = littles.pop(0)
alias_probs_[little[0]] = 1.0
alias_[little[0]] = -1
def _init_by_numpy_array(numpy_array):
ret = self.create_parameter(
attr=ParamAttr(),
shape=numpy_array.shape,
dtype=numpy_array.dtype,
default_initializer=NumpyArrayInitializer(numpy_array))
ret.stop_gradient = True
return ret
self._inputs['CustomDistProbs'] = _init_by_numpy_array(
np.array(custom_dist).astype('float32'))
self._inputs['CustomDistAlias'] = _init_by_numpy_array(
np.array(alias_).astype('int32'))
self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array(
np.array(alias_probs_).astype('float32'))
sampler = 2
else:
raise Exception("Unsupported sampler type.")
if num_neg_samples is None:
num_neg_samples = 10
else:
num_neg_samples = int(num_neg_samples)
self._num_neg_samples = num_neg_samples
remote_prefetch = is_sparse
print(
"With sparse mode, if your models has only small parameter prefetch may cause speed down"
)
self._attrs = {
'num_total_classes': int(num_total_classes),
'num_neg_samples': num_neg_samples,
'seed': seed,
'sampler': sampler,
'is_sparse': is_sparse,
'remote_prefetch': remote_prefetch
}
self.weight = self.create_parameter(
attr=self._param_attr,
shape=[self._num_total_classes, dim],
is_bias=False,
dtype=self._dtype)
if self._bias_attr:
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_total_classes, 1],
is_bias=True,
dtype=self._dtype)
self._inputs['Bias'] = self.bias
self._inputs['Weight'] = self.weight
def forward(self, input, label, sample_weight=None):
check_variable_and_dtype(input, "input", ['float32', 'float64'], "NCE")
check_variable_and_dtype(label, "label", ['int64'], "NCE")
check_type(sample_weight, 'sample_weight', (Variable, type(None)),
'NCE')
assert isinstance(input, Variable)
assert isinstance(label, Variable)
self._inputs['Input'] = input
self._inputs['Label'] = label
self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []
cost = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_logits = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
sample_labels = self._helper.create_variable_for_type_inference(
dtype=label.dtype)
self._helper.append_op(
type='nce',
inputs=self._inputs,
outputs={
'Cost': cost,
'SampleLogits': sample_logits,
'SampleLabels': sample_labels
},
attrs=self._attrs)
return cost / (self._num_neg_samples + 1)
class PRelu(layers.Layer):
"""
This interface is used to construct a callable object of the ``PRelu`` class.
For more details, refer to code examples.
It implements three activation methods of the ``PRelu`` activation function.
Equation:
.. math::
y = \max(0, x) + \\alpha * \min(0, x)
Parameters:
mode (str): The mode for weight sharing. It supports all, channel
and element. all: all elements share same weight
channel:elements in a channel share same weight
element:each element has a weight
channel (int, optional): The number of channels.
This argument is required when mode is "channel".
Default: None.
input_shape (list or tuple, optional): The shape of input.
This argument is required when mode is "element".
Default: None.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
weight (alpha). Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
import numpy as np
inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with fluid.dygraph.guard():
inp_np = to_variable(inp_np)
prelu0 = fluid.PRelu(
mode='all',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt0 = prelu0(inp_np)
prelu1 = fluid.PRelu(
mode='channel',
channel=200,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt1 = prelu1(inp_np)
prelu2 = fluid.PRelu(
mode='element',
input_shape=inp_np.shape,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))
dy_rlt2 = prelu2(inp_np)
"""
def __init__(self,
mode,
channel=None,
input_shape=None,
param_attr=None,
dtype='float32'):
# need specify name_scope since snake-cased 'PRelu' is 'p_relu'
super(PRelu, self).__init__(name_scope='prelu')
self._mode = mode
self._param_attr = param_attr
self._dtype = dtype
if mode == 'all':
self._alpha_shape = [1]
elif mode == 'channel':
assert isinstance(
channel,
int), "channel argument is required when mode is 'channel'."
#NOTE(zhiqiu): The _alpha_shape should be [1, channel] + [1] * len(input_shape[2:]), not [1, channel, 1, 1].
# However, the suffix 1 in the list is useless, since the tensor is viewed as one demension array during kernel calculation.
# And, input_shape is not required when mode is 'channel', so it is simplified.
#NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
self._alpha_shape = [1, channel, 1, 1]
elif mode == 'element':
assert isinstance(input_shape, (
list, tuple
)), "input_shape argument is required when mode is 'element'."
self._alpha_shape = [1] + list(input_shape)[1:]
else:
raise ValueError('mode should be one of all, channel, element.')
self.weight = self.create_parameter(
attr=self._param_attr,
shape=self._alpha_shape,
dtype='float32',
is_bias=False,
default_initializer=Constant(1.0))
def forward(self, input):
check_variable_and_dtype(input, 'input', ['float32'], 'PRelu')
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="prelu",
inputs={"X": input,
'Alpha': self.weight},
attrs={"mode": self._mode},
outputs={"Out": out})
return out
class BilinearTensorProduct(layers.Layer):
"""
:alias_main: paddle.nn.BilinearTensorProduct
:alias: paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct
:old_api: paddle.fluid.dygraph.BilinearTensorProduct
**Add Bilinear Tensor Product Layer**
This layer performs bilinear tensor product on two inputs.
For example:
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N]
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y`.
Parameters:
input1_dim (int): The dimension of each first input.
input2_dim (int): The dimension of each second input.
output_dim (int): The dimension of output of this layer.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
act (str, optional): Activation to be applied to the output of this layer. The default value is None.
param_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of
this layer. The default value is None.
bias_attr (ParamAttr, optional): The parameter attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. The default value is None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of this layer.
**bias** (Parameter): the learnable bias of this layer.
Returns:
Variable: A 2-D Tensor of shape [batch_size, size].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
layer1 = numpy.random.random((5, 5)).astype('float32')
layer2 = numpy.random.random((5, 4)).astype('float32')
bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct(
input1_dim=5, input2_dim=4, output_dim=1000)
ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1),
fluid.dygraph.base.to_variable(layer2))
"""
def __init__(self,
input1_dim,
input2_dim,
output_dim,
name=None,
act=None,
param_attr=None,
bias_attr=None,
dtype='float32'):
super(BilinearTensorProduct, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._name = name
self._input1_dim = input1_dim
self._input2_dim = input2_dim
self._output_dim = output_dim
self._inputs = dict()
self._dtype = dtype
param_shape = [self._output_dim, self._input1_dim, self._input2_dim]
self.weight = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=False)
bias_size = [1, self._output_dim]
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=bias_size,
dtype=self._dtype,
is_bias=True)
@deprecated(
since="2.0.0",
update_to="paddle.nn.Bilinear",
reason="New name and new args in Bilinear, easier to use.")
def forward(self, x, y):
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'BilinearTensorProduct')
check_variable_and_dtype(y, 'y', ['float32', 'float64'],
'BilinearTensorProduct')
self._inputs = {"X": x, "Y": y, "Weight": self.weight}
if self.bias is not None:
self._inputs["Bias"] = self.bias
if self._name is not None:
out = self._helper.create_variable(
name=".".join([self.full_name(), self._name]),
dtype=self._dtype,
persistable=False)
else:
out = self._helper.create_variable(
dtype=self._dtype, persistable=False)
self._helper.append_op(
type="bilinear_tensor_product",
inputs=self._inputs,
outputs={"Out": out})
# add activation
return self._helper.append_activation(out, act=self._act)
class Conv2DTranspose(layers.Layer):
"""
This interface is used to construct a callable object of the ``Conv2DTranspose`` class.
For more details, refer to code examples.
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input and output
are in NCHW format. Where N is batch size, C is the number of feature map,
H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of input feature map,
C is the number of output feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
The details of convolution transpose layer, please refer to the following explanation and references
`conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Parameters:
num_channels(int): The number of channels in the input image.
num_filters(int): The number of the filter. It is as same as the output
feature map.
filter_size(int or tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
output_size(int or tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). None if use
filter_size, padding, and stride to calculate output_size.
if output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None.
padding(int or tuple, optional): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
stride(int or tuple, optional): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(
num_channels=32, num_filters=2, filter_size=3)
ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))
"""
def __init__(self,
num_channels,
num_filters,
filter_size,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
dtype='float32'):
super(Conv2DTranspose, self).__init__()
assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self._groups = groups
self._num_channels = num_channels
self._num_filters = num_filters
self._use_cudnn = use_cudnn
self._padding = padding
self._stride = stride
self._dilation = dilation
self._filter_size = filter_size
self._output_size = output_size
self._dtype = dtype
if (self._num_channels == self._groups and
self._num_filters == self._num_channels and
not self._use_cudnn):
self._op_type = 'depthwise_conv2d_transpose'
else:
self._op_type = 'conv2d_transpose'
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._stride = utils.convert_to_list(self._stride, 2, 'stride')
self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation')
self._filter_size = utils.convert_to_list(
self._filter_size, 2, 'conv2d_transpose.filter_size')
if self._output_size is None:
self._output_size = []
elif isinstance(self._output_size, list) or isinstance(
self._output_size, int):
self._output_size = utils.convert_to_list(self._output_size, 2,
'output_size')
else:
raise ValueError("output_size should be list or int")
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._groups = 1 if self._groups is None else self._groups
filter_shape = [self._num_channels, self._num_filters // self._groups
] + self._filter_size
self.weight = self.create_parameter(
dtype=self._dtype, shape=filter_shape, attr=self._param_attr)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
if in_dygraph_mode():
op = getattr(core.ops, self._op_type)
out = op(input, self.weight, 'output_size', self._output_size,
'strides', self._stride, 'paddings', self._padding,
'dilations', self._dilation, 'groups', self._groups,
'use_cudnn', self._use_cudnn)
pre_bias = out
pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias,
1)
return dygraph_utils._append_activation_in_dygraph(
pre_act, act=self._act)
check_variable_and_dtype(input, 'input',
['float16', 'float32', 'float64'],
"Conv2DTranspose")
inputs = {'Input': [input], 'Filter': [self.weight]}
attrs = {
'output_size': self._output_size,
'strides': self._stride,
'paddings': self._padding,
'dilations': self._dilation,
'groups': self._groups,
'use_cudnn': self._use_cudnn
}
pre_bias = self._helper.create_variable_for_type_inference(
dtype=input.dtype)
self._helper.append_op(
type=self._op_type,
inputs=inputs,
outputs={'Output': pre_bias},
attrs=attrs)
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
out = self._helper.append_activation(pre_act, act=self._act)
return out
class SequenceConv(layers.Layer):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
Parameters:
name_scope(str): The name of this class.
num_filters (int): number of filters.
filter_size (int): the filter size (H and W). Default: 3.
filter_stride (int): stride of the filter. Default: 1.
padding (bool|None): if True, add paddings. Default: None
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns:
Variable: output of sequence_conv
"""
def __init__(self,
name_scope,
num_filters,
filter_size=3,
filter_stride=1,
padding=None,
bias_attr=None,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "SequenceConv is not supported by dynamic graph mode yet!"
super(SequenceConv, self).__init__(name_scope)
self._num_filters = num_filters
self._filter_size = filter_size
self._filter_stride = filter_stride
self._padding = padding
self._bias_attr = bias_attr
self._param_attr = param_attr
self._act = act
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._filter_size * input.shape[1], self._num_filters]
self.weight = self.create_parameter(
attr=self._param_attr, shape=filter_shape, dtype=self._dtype)
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [self.weight],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': self._filter_stride,
'contextStart': -int(self._filter_size // 2),
'contextLength': self._filter_size
})
if self.bias is not None:
pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self.bias]},
outputs={'Out': [pre_act]},
attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act)
class RowConv(layers.Layer):
"""
***Row-convolution operator***
The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2:
http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf
The main motivation is that a bidirectional RNN, useful in DeepSpeech like speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution operator is
different from the 1D sequence convolution, and is computed as follows:
Given an input sequence X of length t and input dimension D, and a filter (W) of size context * D.
More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
Parameters:
name_scope(str): The name of this class.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
name, initializer etc. Default: None.
act (str): Non-linear activation to be applied to output variable. Default: None.
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns:
the output(Out) is a LodTensor, which supports variable time-length input sequences.
The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
x = numpy.random.random((16)).astype('float32')
rowConv = fluid.dygraph.nn.RowConv(
'RowConv', future_context_size=2)
ret = rowConv(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
name_scope,
future_context_size,
param_attr=None,
act=None):
assert not in_dygraph_mode(
), "RowConv is not supported by dynamic graph mode yet!"
super(RowConv, self).__init__(name_scope)
self._act = act
self._param_attr = param_attr
self._future_context_size = future_context_size
def _build_once(self, input):
self._dtype = self._helper.input_dtype(input)
filter_shape = [self._future_context_size + 1, input.shape[1]]
self.weight = self.create_parameter(
attr=self._param_attr,
shape=filter_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type='row_conv',
inputs={'X': [input],
'Filter': [self.weight]},
outputs={'Out': [out]})
return self._helper.append_activation(out, act=self._act)
class GroupNorm(layers.Layer):
"""
:alias_main: paddle.nn.GroupNorm
:alias: paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm
:old_api: paddle.fluid.dygraph.GroupNorm
This interface is used to construct a callable object of the ``GroupNorm`` class.
For more details, refer to code examples.
It implements the function of the Group Normalization Layer.
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
channels(int): The number of channels of input.
groups(int): The number of groups that divided from channels.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
param_attr(ParamAttr, optional): The parameter attribute for the learnable
scale :math:`g`. If it is set to False, no scale will be added to the output units.
If it is set to None, the bias is initialized one. Default: None.
bias_attr(ParamAttr, optional): The parameter attribute for the learnable
bias :math:`b`. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act(str, optional): Activation to be applied to the output of group normalization. Default: None.
data_layout(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = np.random.random((8, 32, 32)).astype('float32')
groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4)
ret = groupNorm(fluid.dygraph.base.to_variable(x))
"""
def __init__(self,
channels,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
dtype='float32'):
super(GroupNorm, self).__init__()
self._param_attr = param_attr
self._bias_attr = bias_attr
self._epsilon = epsilon
self._channels = channels
self._groups = groups
self._act = act
self._dtype = dtype
if data_layout != 'NCHW':
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [self._channels]
self.weight = self.create_parameter(
attr=self._param_attr or False,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
self.bias = self.create_parameter(
attr=self._bias_attr or False,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
def forward(self, input):
inputs = {'X': input}
if self.bias is not None:
inputs['Bias'] = self.bias
if self.weight is not None:
inputs['Scale'] = self.weight
# create output
mean_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
variance_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
group_norm_out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": self._epsilon,
"groups": self._groups})
return self._helper.append_activation(group_norm_out, self._act)
class SpectralNorm(layers.Layer):
"""
:alias_main: paddle.nn.SpectralNorm
:alias: paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm
:old_api: paddle.fluid.dygraph.SpectralNorm
This interface is used to construct a callable object of the ``SpectralNorm`` class.
For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.
This layer calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Calculations are showed as follows.
Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.
Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds.
.. math::
\mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}
\mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}
Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
.. math::
\sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
\mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Parameters:
weight_shape(list or tuple): The shape of weight parameter.
dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.
eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
weight = np.random.random((2, 8, 32, 32)).astype('float32')
spectralNorm = fluid.dygraph.nn.SpectralNorm(weight.shape, dim=1, power_iters=2)
ret = spectralNorm(fluid.dygraph.base.to_variable(weight))
"""
def __init__(self,
weight_shape,
dim=0,
power_iters=1,
eps=1e-12,
dtype='float32'):
super(SpectralNorm, self).__init__()
self._power_iters = power_iters
self._eps = eps
self._dim = dim
self._dtype = dtype
self._weight_shape = list(weight_shape)
h = self._weight_shape[self._dim]
w = np.prod(self._weight_shape) // h
self.weight_u = self.create_parameter(
attr=ParamAttr(),
shape=[h],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.weight_u.stop_gradient = True
self.weight_v = self.create_parameter(
attr=ParamAttr(),
shape=[w],
dtype=self._dtype,
default_initializer=Normal(0., 1.))
self.weight_v.stop_gradient = True
def forward(self, weight):
check_variable_and_dtype(weight, "weight", ['float32', 'float64'],
'SpectralNorm')
inputs = {'Weight': weight, 'U': self.weight_u, 'V': self.weight_v}
out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="spectral_norm",
inputs=inputs,
outputs={"Out": out, },
attrs={
"dim": self._dim,
"power_iters": self._power_iters,
"eps": self._eps,
})
return out
class TreeConv(layers.Layer):
"""
This interface is used to construct a callable object of the ``TreeConv`` class.
For more details, refer to code examples.
Tree-Based Convolution is a kind of convolution based on tree structure.
Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),
which is used to classify tree structures, such as Abstract Syntax Tree.
Tree-Based Convolution proposed a kind of data structure called continuous binary tree,
which regards multiway tree as binary tree.
The paper of Tree-Based Convolution Operator is here: `tree-based convolution <https://arxiv.org/abs/1409.5718v1/>`_ .
Parameters:
feature_size(int): last dimension of nodes_vector.
output_size(int): output feature width.
num_filters(int, optional): number of filters, Default: 1.
max_depth(int, optional): max depth of filters, Default: 2.
act(str, optional): activation function, Default: tanh.
param_attr(ParamAttr, optional): the parameter attribute for the filters, Default: None.
bias_attr(ParamAttr, optional): the parameter attribute for the bias of this layer, Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
dtype (str, optional): Data type, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
with fluid.dygraph.guard():
nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')
edge_set = numpy.random.random((1, 9, 2)).astype('int32')
treeConv = fluid.dygraph.nn.TreeConv(
feature_size=5, output_size=6, num_filters=1, max_depth=2)
ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))
"""
def __init__(self,
feature_size,
output_size,
num_filters=1,
max_depth=2,
act='tanh',
param_attr=None,
bias_attr=None,
name=None,
dtype='float32'):
super(TreeConv, self).__init__()
self._name = name
self._feature_size = feature_size
self._output_size = output_size
self._act = act
self._max_depth = max_depth
self._num_filters = num_filters
self._bias_attr = bias_attr
self._param_attr = param_attr
self._dtype = dtype
w_shape = [self._feature_size, 3, self._output_size, self._num_filters]
if self._bias_attr:
self.bias = self.create_parameter(
attr=self._bias_attr,
shape=[self._num_filters],
dtype=self._dtype,
is_bias=True)
self.weight = self.create_parameter(
attr=self._param_attr,
shape=w_shape,
dtype=self._dtype,
is_bias=False)
def forward(self, nodes_vector, edge_set):
check_type(nodes_vector, 'nodes_vector', (Variable), 'TreeConv')
check_type(edge_set, 'edge_set', (Variable), 'TreeConv')
if self._name:
out = self.create_variable(
name=self._name, dtype=self._dtype, persistable=False)
else:
out = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='tree_conv',
inputs={
'NodesVector': nodes_vector,
'EdgeSet': edge_set,
'Filter': self.weight
},
outputs={'Out': out, },
attrs={'max_depth': self._max_depth})
if self._bias_attr:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [out],
'Y': [self.bias]},
outputs={'Out': [pre_activation]},
attrs={'axis': 1})
else:
pre_activation = out
return self._helper.append_activation(pre_activation, act=self._act)
class Flatten(layers.Layer):
"""
:alias_main: paddle.nn.Flatten
:alias: paddle.nn.Flatten,paddle.nn.layer.Flatten,paddle.nn.layer.common.Flatten
This interface is used to construct a callable object of the ``FLatten`` class.
For more details, refer to code examples.
It implements flatten a contiguous range of dims into a tensor.
Equation:
Parameters:
start_axis(int): first dim to flatten (default = 1)
stop_axis(int): last dim to flatten (default = -1).
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
inp_np = paddle.to_tensor(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np)
"""
def __init__(self, start_axis=1, stop_axis=-1):
super(Flatten, self).__init__()
self.start_axis = start_axis
self.stop_axis = stop_axis
def forward(self, input):
out = paddle.tensor.manipulation.flatten(
input, start_axis=self.start_axis, stop_axis=self.stop_axis)
return out
|
py | 1a488b59626ce409ac46b0f314ed513ff3ff0474 | import os
os.environ['DGLBACKEND'] = 'mxnet'
import mxnet as mx
from mxnet import nd, gluon, autograd
import dgl
import numpy as np
import pandas as pd
import time
import logging
import pickle
import math
from estimator_fns import *
from graph import *
from data import *
from utils import *
from model.mxnet import *
from sampler import *
def normalize(feature_matrix):
mean = nd.mean(feature_matrix, axis=0)
stdev = nd.sqrt(nd.sum((feature_matrix - mean)**2, axis=0)/feature_matrix.shape[0])
return (feature_matrix - mean) / stdev
def get_dataloader(data_size, batch_size, mini_batch=True):
batch_size = batch_size if mini_batch else data_size
train_dataloader = gluon.data.BatchSampler(gluon.data.RandomSampler(data_size), batch_size, 'keep')
test_dataloader = gluon.data.BatchSampler(gluon.data.SequentialSampler(data_size), batch_size, 'keep')
return train_dataloader, test_dataloader
def train(model, trainer, loss, features, labels, train_loader, test_loader, train_g, test_g, train_mask, valid_mask, test_mask, ctx, n_epochs, batch_size, output_dir, thresh, scale_pos_weight, compute_metrics=True, mini_batch=True):
duration = []
for epoch in range(n_epochs):
tic = time.time()
loss_val = 0.
for n, batch in enumerate(train_loader):
# logging.info("Iteration: {:05d}".format(n))
node_flow, batch_nids = train_g.sample_block(nd.array(batch).astype('int64'))
batch_indices = nd.array(batch, ctx=ctx)
with autograd.record():
pred = model(node_flow, features[batch_nids.as_in_context(ctx)])
l = loss(pred, labels[batch_indices], mx.nd.expand_dims(scale_pos_weight*train_mask, 1)[batch_indices])
l = l.sum()/len(batch)
l.backward()
trainer.step(batch_size=1, ignore_stale_grad=True)
loss_val += l.asscalar()
# logging.info("Current loss {:04f}".format(loss_val/(n+1)))
duration.append(time.time() - tic)
train_metric, valid_metric = evaluate(model, train_g, features, labels, train_mask, valid_mask, ctx, batch_size, mini_batch)
logging.info("Epoch {:05d} | Time(s) {:.4f} | Training Loss {:.4f} | Training F1 {:.4f} | Validation F1 {:.4f}".format(
epoch, np.mean(duration), loss_val/(n+1), train_metric, valid_metric))
class_preds, pred_proba = get_model_class_predictions(model, test_g, test_loader, features, ctx, threshold=thresh)
if compute_metrics:
acc, f1, p, r, roc, pr, ap, cm = get_metrics(class_preds, pred_proba, labels, test_mask, output_dir)
logging.info("Metrics")
logging.info("""Confusion Matrix:
{}
f1: {:.4f}, precision: {:.4f}, recall: {:.4f}, acc: {:.4f}, roc: {:.4f}, pr: {:.4f}, ap: {:.4f}
""".format(cm, f1, p, r, acc, roc, pr, ap))
return model, class_preds, pred_proba
def evaluate(model, g, features, labels, train_mask, valid_mask, ctx, batch_size, mini_batch=True):
train_f1, valid_f1 = mx.metric.F1(), mx.metric.F1()
preds = []
batch_size = batch_size if mini_batch else features.shape[0]
dataloader = gluon.data.BatchSampler(gluon.data.SequentialSampler(features.shape[0]), batch_size, 'keep')
for batch in dataloader:
node_flow, batch_nids = g.sample_block(nd.array(batch).astype('int64'))
preds.append(model(node_flow, features[batch_nids.as_in_context(ctx)]))
nd.waitall()
# preds = nd.concat(*preds, dim=0).argmax(axis=1)
preds = nd.concat(*preds, dim=0)
train_mask = nd.array(np.where(train_mask.asnumpy()), ctx=ctx)
valid_mask = nd.array(np.where(valid_mask.asnumpy()), ctx=ctx)
train_f1.update(preds=nd.softmax(preds[train_mask], axis=1).reshape(-3, 0), labels=labels[train_mask].reshape(-1,))
valid_f1.update(preds=nd.softmax(preds[valid_mask], axis=1).reshape(-3, 0), labels=labels[valid_mask].reshape(-1,))
return train_f1.get()[1], valid_f1.get()[1]
def get_model_predictions(model, g, dataloader, features, ctx):
pred = []
for batch in dataloader:
node_flow, batch_nids = g.sample_block(nd.array(batch).astype('int64'))
pred.append(model(node_flow, features[batch_nids.as_in_context(ctx)]))
nd.waitall()
return nd.concat(*pred, dim=0)
def get_model_class_predictions(model, g, datalaoder, features, ctx, threshold=None):
unnormalized_preds = get_model_predictions(model, g, datalaoder, features, ctx)
pred_proba = nd.softmax(unnormalized_preds)[:, 1].asnumpy().flatten()
if not threshold:
return unnormalized_preds.argmax(axis=1).asnumpy().flatten().astype(int), pred_proba
return np.where(pred_proba > threshold, 1, 0), pred_proba
def save_prediction(pred, pred_proba, id_to_node, training_dir, new_accounts, output_dir, predictions_file):
prediction_query = read_masked_nodes(os.path.join(training_dir, new_accounts))
pred_indices = np.array([id_to_node[query] for query in prediction_query])
pd.DataFrame.from_dict({'target': prediction_query,
'pred_proba': pred_proba[pred_indices],
'pred': pred[pred_indices]}).to_csv(os.path.join(output_dir, predictions_file),
index=False)
def save_model(g, model, model_dir, hyperparams):
model.save_parameters(os.path.join(model_dir, 'model.params'))
with open(os.path.join(model_dir, 'model_hyperparams.pkl'), 'wb') as f:
pickle.dump(hyperparams, f)
with open(os.path.join(model_dir, 'graph.pkl'), 'wb') as f:
pickle.dump(g, f)
def get_model(g, hyperparams, in_feats, n_classes, ctx, model_dir=None):
if model_dir: # load using saved model state
with open(os.path.join(model_dir, 'model_hyperparams.pkl'), 'rb') as f:
hyperparams = pickle.load(f)
with open(os.path.join(model_dir, 'graph.pkl'), 'rb') as f:
g = pickle.load(f)
if hyperparams['heterogeneous']:
model = HeteroRGCN(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
hyperparams['embedding_size'],
ctx)
else:
if hyperparams['model'] == 'gcn':
model = GCN(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
nd.relu,
hyperparams['dropout'])
elif hyperparams['model'] == 'graphsage':
model = GraphSAGE(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
nd.relu,
hyperparams['dropout'],
hyperparams['aggregator_type'])
else:
heads = ([hyperparams['num_heads']] * hyperparams['n_layers']) + [hyperparams['num_out_heads']]
model = GAT(g,
in_feats,
hyperparams['n_hidden'],
n_classes,
hyperparams['n_layers'],
heads,
gluon.nn.Lambda(lambda data: nd.LeakyReLU(data, act_type='elu')),
hyperparams['dropout'],
hyperparams['attn_drop'],
hyperparams['alpha'],
hyperparams['residual'])
if hyperparams['no_features']:
model = NodeEmbeddingGNN(model, in_feats, hyperparams['embedding_size'])
if model_dir:
model.load_parameters(os.path.join(model_dir, 'model.params'))
else:
model.initialize(ctx=ctx)
return model
if __name__ == '__main__':
logging = get_logger(__name__)
logging.info('numpy version:{} MXNet version:{} DGL version:{}'.format(np.__version__,
mx.__version__,
dgl.__version__))
args = parse_args()
args.edges = get_edgelists(args.edges, args.training_dir)
g, features, id_to_node = construct_graph(args.training_dir, args.edges, args.nodes, args.target_ntype,
args.heterogeneous)
features = normalize(nd.array(features))
if args.heterogeneous:
g.nodes['target'].data['features'] = features
else:
g.ndata['features'] = features
logging.info("Getting labels")
n_nodes = g.number_of_nodes('target') if args.heterogeneous else g.number_of_nodes()
labels, train_mask, valid_mask, test_mask = get_labels(
id_to_node,
n_nodes,
args.target_ntype,
os.path.join(args.training_dir, args.labels),
os.path.join(args.training_dir, args.validation_data),
os.path.join(args.training_dir, args.new_accounts),
)
logging.info("Got labels")
labels = nd.array(labels).astype('float32')
train_mask = nd.array(train_mask).astype('float32')
valid_mask = nd.array(valid_mask).astype('float32')
test_mask = nd.array(test_mask).astype('float32')
n_nodes = sum([g.number_of_nodes(n_type) for n_type in g.ntypes]) if args.heterogeneous else g.number_of_nodes()
n_edges = sum([g.number_of_edges(e_type) for e_type in g.etypes]) if args.heterogeneous else g.number_of_edges()
logging.info("""----Data statistics------'
#Nodes: {}
#Edges: {}
#Features Shape: {}
#Labeled Train samples: {}
#Unlabeled Test samples: {}""".format(n_nodes,
n_edges,
features.shape,
train_mask.sum().asscalar(),
test_mask.sum().asscalar()))
if args.num_gpus:
cuda = True
ctx = mx.gpu(0)
else:
cuda = False
ctx = mx.cpu(0)
logging.info("Initializing Model")
in_feats = args.embedding_size if args.no_features else features.shape[1]
n_classes = 2
model = get_model(g, vars(args), in_feats, n_classes, ctx)
logging.info("Initialized Model")
if args.no_features:
features = nd.array(g.nodes('target'), ctx) if args.heterogeneous else nd.array(g.nodes(), ctx)
else:
features = features.as_in_context(ctx)
labels = labels.as_in_context(ctx)
train_mask = train_mask.as_in_context(ctx)
valid_mask = valid_mask.as_in_context(ctx)
test_mask = test_mask.as_in_context(ctx)
if not args.heterogeneous:
# normalization
degs = g.in_degrees().astype('float32')
norm = mx.nd.power(degs, -0.5)
if cuda:
norm = norm.as_in_context(ctx)
g.ndata['norm'] = mx.nd.expand_dims(norm, 1)
if args.mini_batch:
train_g = HeteroGraphNeighborSampler(g, 'target', args.n_layers, args.n_neighbors) if args.heterogeneous\
else NeighborSampler(g, args.n_layers, args.n_neighbors)
test_g = HeteroGraphNeighborSampler(g, 'target', args.n_layers) if args.heterogeneous\
else NeighborSampler(g, args.n_layers)
else:
train_g, test_g = FullGraphSampler(g, args.n_layers), FullGraphSampler(g, args.n_layers)
train_data, test_data = get_dataloader(features.shape[0], args.batch_size, args.mini_batch)
loss = gluon.loss.SoftmaxCELoss()
scale_pos_weight = nd.sqrt((train_mask.shape[0] - train_mask.sum()) / train_mask.sum())
logging.info(model)
logging.info(model.collect_params())
trainer = gluon.Trainer(model.collect_params(), args.optimizer, {'learning_rate': args.lr, 'wd': args.weight_decay})
logging.info("Starting Model training")
model, pred, pred_proba = train(model, trainer, loss, features, labels, train_data, test_data, train_g, test_g,
train_mask, valid_mask, test_mask, ctx, args.n_epochs, args.batch_size, args.output_dir,
args.threshold, scale_pos_weight, args.compute_metrics, args.mini_batch)
logging.info("Finished Model training")
logging.info("Saving model")
save_model(g, model, args.model_dir, vars(args))
logging.info("Saving model predictions for new accounts")
save_prediction(pred, pred_proba, id_to_node, args.training_dir, args.new_accounts, args.output_dir, args.predictions)
|
py | 1a488b72db5f9cbb4dd6c69bbe4724d32d8d73f6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django_fsm import transition, RETURN_VALUE
from shop.models.order import BaseOrder, OrderModel
from .base import PaymentProvider
class ForwardFundPayment(PaymentProvider):
"""
Provides a simple prepayment payment provider.
"""
namespace = 'forward-fund-payment'
def get_payment_request(self, cart, request):
order = OrderModel.objects.create_from_cart(cart, request)
order.populate_from_cart(cart, request)
if order.total == 0:
order.no_payment_required()
else:
order.awaiting_payment()
order.save()
thank_you_url = OrderModel.objects.get_latest_url()
return '$window.location.href="{}";'.format(thank_you_url)
class ManualPaymentWorkflowMixin(object):
"""
Add this class to `settings.SHOP_ORDER_WORKFLOWS` to mix it into your `OrderModel`.
It adds all the methods required for state transitions, when used with the
`ForwardFundPayment` provider from above.
"""
TRANSITION_TARGETS = {
'awaiting_payment': _("Awaiting a forward fund payment"),
'prepayment_deposited': _("Prepayment deposited"),
'no_payment_required': _("No Payment Required"),
}
def __init__(self, *args, **kwargs):
if not isinstance(self, BaseOrder):
raise ImproperlyConfigured("class 'ManualPaymentWorkflowMixin' is not of type 'BaseOrder'")
CancelOrderWorkflowMixin.CANCELABLE_SOURCES.update(['awaiting_payment', 'prepayment_deposited',
'no_payment_required'])
super(ManualPaymentWorkflowMixin, self).__init__(*args, **kwargs)
def is_fully_paid(self):
return super(ManualPaymentWorkflowMixin, self).is_fully_paid()
@transition(field='status', source=['created'], target='no_payment_required')
def no_payment_required(self):
"""
Signals that an Order can proceed directly, by confirming a payment of value zero.
"""
@transition(field='status', source=['created'], target='awaiting_payment')
def awaiting_payment(self):
"""
Signals that the current Order awaits a payment.
Invoked by ForwardFundPayment.get_payment_request.
"""
def deposited_too_little(self):
return self.amount_paid > 0 and self.amount_paid < self.total
@transition(field='status', source=['awaiting_payment'], target='awaiting_payment',
conditions=[deposited_too_little], custom=dict(admin=True, button_name=_("Deposited too little")))
def prepayment_partially_deposited(self):
"""
Signals that the current Order received a payment, which was not enough.
"""
@transition(field='status', source=['awaiting_payment'], target='prepayment_deposited',
conditions=[is_fully_paid], custom=dict(admin=True, button_name=_("Mark as Paid")))
def prepayment_fully_deposited(self):
"""
Signals that the current Order received a payment, which fully covers the requested sum.
"""
@transition(field='status', source=['prepayment_deposited', 'no_payment_required'],
custom=dict(auto=True))
def acknowledge_prepayment(self):
"""
Acknowledge the payment. This method is invoked automatically.
"""
self.acknowledge_payment()
@transition(field='status', source='refund_payment', target=RETURN_VALUE('refund_payment', 'order_canceled'),
custom=dict(admin=True, button_name=_("Mark as Refunded")))
def payment_refunded(self):
"""
Signals that the payment for this Order has been refunded manually.
"""
return 'refund_payment' if self.amount_paid else 'order_canceled'
class CancelOrderWorkflowMixin(object):
"""
Add this class to `settings.SHOP_ORDER_WORKFLOWS` to mix it into your `OrderModel`.
It adds all the methods required for state transitions, to cancel an order.
"""
CANCELABLE_SOURCES = {'new', 'created', 'payment_confirmed', 'payment_declined'}
TRANSITION_TARGETS = {
'refund_payment': _("Refund payment"),
'order_canceled': _("Order Canceled"),
}
def cancelable(self):
return self.status in self.CANCELABLE_SOURCES
@transition(field='status', target=RETURN_VALUE(*TRANSITION_TARGETS.keys()),
conditions=[cancelable], custom=dict(admin=True, button_name=_("Cancel Order")))
def cancel_order(self):
"""
Signals that an Order shall be canceled.
"""
if self.amount_paid:
self.refund_payment()
return 'refund_payment' if self.amount_paid else 'order_canceled'
|
py | 1a488bea775b2b2dcd23dc2ef71eb70a38d3f5fd | """
Rate expressions only for A+B=R (ABtoR)
"""
import numpy as np
from pmutt import constants as c
species_names = ['A', 'B', 'R']
#%% Define the form of the rate constant
class RateConstant():
def __init__(self, name = 'k'):
self.name = name
def value(self, para_dict, temperature=None, energy_unit='eV'):
if temperature is None:
k_value = para_dict[self.name] # input is log10(prefactor)
else:
# based on the unit of Ea, must be J, kJ, cal, kcal, eV etc.
# set the unit for kb
kb_unit = energy_unit + '/K'
prefactor = para_dict[self.name+'_prefactor']
Ea = 10**(para_dict[self.name+'_Ea']) # input is log10(Ea)
k_value = prefactor * np.exp(-Ea/c.kb(kb_unit)/temperature)
return k_value
#%% Define all groups in the table as dictionaries
#%%
# Driving force group (DFG)
def driving_suface_reaction_controlling(concentrations, para_dict, temperature=None):
K = RateConstant('K').value(para_dict, temperature)
return concentrations[0]*concentrations[1] - concentrations[2]/K
def driving_adsorption_controlling_w_dissociation(concentrations, para_dict, temperature=None):
K = RateConstant('K').value(para_dict, temperature)
return concentrations[0] - concentrations[2]/concentrations[1]/K
driving_force_groups = {'surface reaction controlling': driving_suface_reaction_controlling,
'adsorption controlling': driving_adsorption_controlling_w_dissociation}
#%%
# Kinetic group
def kinetic_suface_reaction_controlling(para_dict, temperature=None):
ksr = RateConstant('ksr').value(para_dict, temperature)
KA = RateConstant('KA').value(para_dict, temperature)
KB = RateConstant('KB').value(para_dict, temperature)
return ksr*KA*KB
def kinetic_adsorption_controlling_w_dissociation(para_dict, species = 'A', temperature=None):
KA = RateConstant('K'+species).value(para_dict, temperature)
return KA
kinetic_groups = {'surface reaction controlling': kinetic_suface_reaction_controlling,
'adsorption controlling with dissociation': kinetic_adsorption_controlling_w_dissociation}
#%%
# Adsorption group
def adsorption_default(concentrations, para_dict, species = 'A', temperature=None):
Kx = RateConstant('K'+species).value(para_dict, temperature)
return Kx*concentrations[species_names.index(species)]
def adsorption_equilirium_w_dissociation(concentrations, para_dict, species = 'A', temperature=None):
Kx = RateConstant('K'+species).value(para_dict, temperature)
return np.sqrt(Kx*concentrations[species_names.index(species)])
def adsorption_controlling_w_dissociation(concentrations, para_dict, species = 'A', temperature=None):
Kx = RateConstant('K'+species).value(para_dict, temperature)
K = RateConstant('K').value(para_dict, temperature)
return np.sqrt(Kx*concentrations[species_names.index('R')]/K/concentrations[species_names.index('B')])
adsorption_groups = {'adsorption default': adsorption_default,
'adsorption equilirium with dissociation': adsorption_equilirium_w_dissociation,
'adsorption controlling with dissociation': adsorption_controlling_w_dissociation}
# Exponents of adsorption groups
exponents = {'surface reaction controlling': {'dissociation': 3},
'adsorption controlling with dissociation': 2}
#%% Define the rate expressions
# General rate experssion
def general_rate(concentrations, para_dict, stoichiometry=None, name=None, temperature=None):
"""Rate expressions from Yang and Hougen
"""
controling_key = 'surface reaction controlling'
ads_key = 'adsorption equilirium with dissociation'
surface_reaction_key = 'dissociation'
adsorption_terms = (1 + adsorption_groups[ads_key](concentrations, para_dict, 'A', temperature) + \
adsorption_groups[ads_key](concentrations, para_dict, 'B', temperature))**exponents[controling_key][surface_reaction_key]
rate = driving_force_groups[controling_key](concentrations, para_dict, temperature) * \
kinetic_groups[controling_key](para_dict, temperature)/adsorption_terms
return rate
def general_rate_ads(concentrations, para_dict, stoichiometry=None, name=None, temperature=None):
"""Rate expressions from Yang and Hougen
"""
controling_key = 'adsorption controlling'
ads_key = 'adsorption controlling with dissociation'
#surface_reaction_key = 'dissociation'
adsorption_terms = (1 + adsorption_groups[ads_key](concentrations, para_dict, 'A', temperature) + \
adsorption_groups['adsorption default'](concentrations, para_dict, 'B', temperature))**exponents[ads_key]
rate = driving_force_groups[controling_key](concentrations, para_dict, temperature) * \
kinetic_groups[ads_key](para_dict, temperature)/adsorption_terms
return rate
|
py | 1a488c93e036bb7c79183a02a162711d300605a5 | """
NOTE: This file is not using to.testing.assert_allclose because most methods need to work for both torch and numpy.
"""
import pytest
import numpy as np
import torch as to
import itertools
import pickle
from typing import NamedTuple
from pyrado.algorithms.utils import ReplayMemory
from pyrado.sampling.step_sequence import StepSequence
from pyrado.sampling.data_format import to_format
from pyrado.sampling.step_sequence import discounted_value, gae_returns
from pyrado.sampling.rollout import rollout
from pyrado.environments.pysim.ball_on_beam import BallOnBeamSim
rewards = [
-200,
-100,
-50,
-25,
-17.5,
]
# Observations has one additional element
observations = [
np.array([3, 2, 7]),
np.array([3, 1, 7]),
np.array([2, 0, 7]),
np.array([3, 1, 3]),
np.array([0, 2, 4]),
np.array([1, 1, 1]),
]
# Actions come from PyTorch
actions = [
to.tensor([0, 1]),
to.tensor([0, 3]),
to.tensor([2, 4]),
to.tensor([3, 1]),
to.tensor([0, 0]),
]
# Policy infos as dict collapse test
policy_infos = [
{'mean': np.array([0, 1]), 'std': 0.4},
{'mean': np.array([0, 3]), 'std': 0.2},
{'mean': np.array([2, 4]), 'std': 0.1},
{'mean': np.array([3, 1]), 'std': 0.05},
{'mean': np.array([0, 0]), 'std': 0.025},
]
# Hidden is a tuple, like we see with LSTMs
hidden = [
(np.array([3, 2, 7]), np.array([2, 1])),
(np.array([4, 9, 8]), np.array([5, 6])),
(np.array([1, 4, 9]), np.array([7, 3])),
(np.array([0, 8, 2]), np.array([4, 9])),
(np.array([2, 7, 6]), np.array([8, 0])),
]
def test_create_rew_only():
# Don't require additional fields for this test
StepSequence.required_fields = {}
ro = StepSequence(rewards=rewards, data_format='numpy')
assert len(ro) == 5
assert (ro.rewards == np.array(rewards)).all()
@pytest.mark.parametrize(
'data_format, tensor_type', [('numpy', np.ndarray), ('torch', to.Tensor)], ids=['numpy', 'torch']
)
def test_create(data_format, tensor_type):
# With actions, observations and dicts
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
assert len(ro) == 5
assert isinstance(ro.rewards, tensor_type)
assert isinstance(ro.observations, tensor_type)
assert isinstance(ro.actions, tensor_type)
assert isinstance(ro.policy_infos['mean'], tensor_type)
assert isinstance(ro.policy_infos['std'], tensor_type)
assert isinstance(ro.hidden[0], tensor_type)
# Done should always be a ndarray
assert isinstance(ro.done, np.ndarray)
assert not ro.done[:-1].any()
assert ro.done[-1]
@pytest.mark.parametrize(
'other_format, tensor_type', [('torch', np.ndarray), ('numpy', to.Tensor)],
ids=['numpy to torch', 'torch to numpy']
)
def test_convert(other_format, tensor_type):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=other_format)
# convert
if other_format == 'numpy':
ro.torch()
elif other_format == 'torch':
ro.numpy()
# Verify
assert isinstance(ro.rewards, tensor_type)
assert isinstance(ro.observations, tensor_type)
assert isinstance(ro.actions, tensor_type)
assert isinstance(ro.policy_infos['mean'], tensor_type)
assert isinstance(ro.policy_infos['std'], tensor_type)
assert isinstance(ro.hidden[0], tensor_type)
# Done should always be a ndarray
assert isinstance(ro.done, np.ndarray)
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_step_iter(data_format):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
assert len(ro) == 5
for i, step in enumerate(ro):
assert step.reward == rewards[i]
# Check current and next
assert (step.observation == to_format(observations[i], data_format)).all()
assert (step.next_observation == to_format(observations[i + 1], data_format)).all()
# Check dict sub element
assert (step.policy_info.mean == to_format(policy_infos[i]['mean'], data_format)).all()
assert (step.hidden[0] == to_format(hidden[i][0], data_format)).all()
@pytest.mark.parametrize(
'sls', [slice(2, 4), slice(2, 5, 2), slice(3), slice(4, None)]
)
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_slice(sls, data_format):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
# Slice rollout
sliced = ro[sls]
# Slice reward list for verification
sliced_rew = rewards[sls]
for i, step in enumerate(sliced):
assert step.reward == sliced_rew[i]
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_add_data(data_format):
ro = StepSequence(
rewards=rewards,
observations=observations,
actions=actions,
policy_infos=policy_infos,
hidden=hidden,
data_format=data_format
)
# Add a data field
ro.add_data('return', discounted_value(ro, 0.9))
assert hasattr(ro, 'return')
# Query new data field from steps
assert abs(ro[2]['return'] - -86.675) < 0.01
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_concat(data_format):
# Create some rollouts with random rewards
ros = [
StepSequence(
rewards=np.random.randn(5),
observations=np.random.randn(6),
actions=np.random.randn(5),
policy_infos={'mean': np.random.randn(5)},
hidden=(np.random.randn(5), np.random.randn(5)),
data_format=data_format
),
StepSequence(
rewards=np.random.randn(5),
observations=np.random.randn(6),
actions=np.random.randn(5),
policy_infos={'mean': np.random.randn(5)},
hidden=(np.random.randn(5), np.random.randn(5)),
data_format=data_format
)
]
# Perform concatenation
cat = StepSequence.concat(ros)
assert cat.continuous
assert cat.rollout_count == 2
# Check steps
for step_ro, step_cat in zip(itertools.chain.from_iterable(ros), cat):
assert step_ro.reward == step_cat.reward
assert step_ro.observation == step_cat.observation
assert step_ro.done == step_cat.done
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_split_multi(data_format):
# Don't require additional fields for this test
StepSequence.required_fields = {}
ro = StepSequence(
rewards=np.arange(20),
rollout_bounds=[0, 4, 11, 17, 20],
data_format=data_format
)
# There should be four parts
assert ro.rollout_count == 4
# Of these sizes
assert list(ro.rollout_lengths) == [4, 7, 6, 3]
# Test selecting one
s1 = ro.get_rollout(1)
assert s1.rollout_count == 1
assert s1[0].reward == ro[4].reward
# Test selecting a slice
s2 = ro.get_rollout(slice(1, -1))
assert s2.rollout_count == 2
assert s2[0].reward == ro[4].reward
assert s2[7].reward == ro[11].reward
# Test selecting by list
s2 = ro.get_rollout([1, 3])
assert s2.rollout_count == 2
assert s2[0].reward == ro[4].reward
assert s2[7].reward == ro[17].reward
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_pickle(data_format):
ro = StepSequence(rewards=rewards, observations=observations, actions=actions, policy_infos=policy_infos,
hidden=hidden, data_format=data_format)
# Pickle/unpickle
ro2 = pickle.loads(pickle.dumps(ro, pickle.HIGHEST_PROTOCOL))
for step, step_pi in zip(ro, ro2):
assert step.reward == step_pi.reward
assert (step.observation == step_pi.observation).all()
assert (step.action == step_pi.action).all()
assert step.done == step_pi.done
@pytest.mark.parametrize(
'env', [
BallOnBeamSim(dt=0.01, max_steps=200),
], ids=['bob_linpol']
)
def test_advantage_calculation(env, linear_policy):
ro = rollout(env, linear_policy)
gamma = 0.99
lamb = 0.95
# Add dummy values
values = np.ones_like(ro.rewards)
if not ro.done[-1]:
values = to.cat([values, 0])
ro.add_data('values', values)
gae1 = gae_returns(ro, gamma, lamb)
# Compute the advantages
gae2 = np.empty_like(values)
for k in reversed(range(ro.length)):
if ro[k].done:
gae2[k] = ro[k].reward - values[k]
else:
gae2[k] = ro[k].reward + gamma*values[k + 1] - values[k] + \
gamma*lamb*gae2[k + 1]
assert (gae1 == gae2).all()
@pytest.mark.replay
@pytest.mark.parametrize(
'capacity', [
1, 2, 8,
], ids=['1', '2', '8']
)
def test_replay_memory(capacity):
rm = ReplayMemory(capacity)
# Create fake rollouts (of length 5)
ro1 = StepSequence(rewards=rewards, observations=observations, actions=actions, hidden=hidden)
ro2 = StepSequence(rewards=rewards, observations=observations, actions=actions, hidden=hidden)
# Concatenate them for testing only
ros = StepSequence.concat([ro1, ro2], truncate_last=True) # same truncate_last behavior as push function
# Check the lengths
rm.push(ro1)
assert len(rm) == len(ro1) or len(rm) == capacity
rm.push(ro2)
assert len(rm) == len(ro1) + len(ro1) or len(rm) == capacity
# Check the elements
shift = len(ros) - capacity
if shift < len(ro1):
assert all(rm.memory.observations[0] == ros.observations[shift])
assert all(rm.memory.observations[-1] == ro2.observations[-2]) # -2 since one was truncated
# A dummy namedtuple for testing
class DummyNT(NamedTuple):
part1: to.Tensor
part2: to.Tensor
@pytest.mark.parametrize(
'data_format', ['numpy', 'torch']
)
def test_namedtuple(data_format):
hid_nt = [DummyNT(*it) for it in hidden]
ro = StepSequence(
rewards=rewards,
hidden=hid_nt,
data_format=data_format
)
assert isinstance(ro.hidden, DummyNT)
for i, step in enumerate(ro):
assert isinstance(step.hidden, DummyNT)
assert (step.hidden.part1 == to_format(hid_nt[i].part1, data_format)).all()
|
py | 1a488ca12c7ee52d974349be64557eaf04ca0742 | import types
import pytest
from plenum.common.exceptions import UnauthorizedClientRequest
from plenum.test.batching_3pc.helper import checkNodesHaveSameRoots
from plenum.test.helper import sendRandomRequests, checkRejectWithReason, waitForSufficientRepliesForRequests
from stp_core.loop.eventually import eventually
from plenum.common.exceptions import InvalidClientRequest
from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_random_and_check
from plenum.common.request import Request
def testRequestStaticValidation(tconf, looper,txnPoolNodeSet,
sdk_wallet_client):
"""
Check that for requests which fail static validation, REQNACK is sent
:return:
"""
node = txnPoolNodeSet[0]
req = sdk_sign_request_from_dict(looper, sdk_wallet_client, {'something': 'nothing'})
req = Request(**req)
with pytest.raises(InvalidClientRequest):
node.doStaticValidation(req)
def test3PCOverBatchWithThresholdReqs(tconf, looper, txnPoolNodeSet, client,
sdk_wallet_client, sdk_pool_handle):
"""
Check that 3 phase commit happens when threshold number of requests are
received and propagated.
:return:
"""
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize)
def test3PCOverBatchWithLessThanThresholdReqs(tconf, looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
"""
Check that 3 phase commit happens when threshold number of requests are
not received but threshold time has passed
:return:
"""
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize - 1)
def testTreeRootsCorrectAfterEachBatch(tconf, looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
"""
Check if both state root and txn tree root are correct and same on each
node after each batch
:return:
"""
# Send 1 batch
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize)
checkNodesHaveSameRoots(txnPoolNodeSet)
# Send 2 batches
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * tconf.Max3PCBatchSize)
checkNodesHaveSameRoots(txnPoolNodeSet)
def testRequestDynamicValidation(tconf, looper, txnPoolNodeSet,
client, wallet1):
"""
Check that for requests which fail dynamic (state based) validation,
REJECT is sent to the client
:return:
"""
# TODO: Change this test for using SDK.
# Now SDK, can't distinguish REJECTED messages and simply raise IndyError
origMethods = []
names = {node.name: 0 for node in txnPoolNodeSet}
def rejectingMethod(self, req):
names[self.name] += 1
# Raise rejection for last request of batch
if tconf.Max3PCBatchSize - names[self.name] == 0:
raise UnauthorizedClientRequest(req.identifier,
req.reqId,
'Simulated rejection')
for node in txnPoolNodeSet:
origMethods.append(node.doDynamicValidation)
node.doDynamicValidation = types.MethodType(rejectingMethod, node)
reqs = sendRandomRequests(wallet1, client, tconf.Max3PCBatchSize)
waitForSufficientRepliesForRequests(looper, client, requests=reqs[:-1])
with pytest.raises(AssertionError):
waitForSufficientRepliesForRequests(looper, client, requests=reqs[-1:])
for node in txnPoolNodeSet:
looper.run(eventually(checkRejectWithReason, client,
'Simulated rejection', node.clientstack.name,
retryWait=1))
for i, node in enumerate(txnPoolNodeSet):
node.doDynamicValidation = origMethods[i]
|
py | 1a488ca8d35562ecf256b35cf37f01e6a67a7399 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sanic import Blueprint
from sanic import response
from trial_rest_api.trial_common import transaction as trial_transaction
from trial_rest_api.consent_common import transaction as consent_transaction
from trial_rest_api import general, security_messaging
from trial_rest_api.errors import ApiBadRequest, ApiInternalError
INVESTIGATORS_BP = Blueprint('investigators')
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
# Used
@INVESTIGATORS_BP.get('investigators')
async def get_all_investigators(request):
"""Fetches complete details of all Accounts in state"""
client_key = general.get_request_key_header(request)
investigator_list = await security_messaging.get_investigators(request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN, client_key)
investigator_list_json = []
for address, dp in investigator_list.items():
investigator_list_json.append({
'public_key': dp.public_key,
'name': dp.name
})
return response.json(body={'data': investigator_list_json},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.post('investigators')
async def register_investigator(request):
"""Updates auth information for the authorized account"""
required_fields = ['name']
general.validate_fields(required_fields, request.json)
name = request.json.get('name')
clinic_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()
# Consent network
client_txn = consent_transaction.create_investigator_client(
txn_signer=clinic_signer,
batch_signer=clinic_signer
)
batch, batch_id = consent_transaction.make_batch_and_id([client_txn], clinic_signer)
await security_messaging.add_investigator(
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch])
try:
await security_messaging.check_batch_status(
request.app.config.CONSENT_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
# Trial network
clinic_txn = trial_transaction.create_investigator(
txn_signer=clinic_signer,
batch_signer=clinic_signer,
name=name
)
batch, batch_id = trial_transaction.make_batch_and_id([clinic_txn], clinic_signer)
await security_messaging.add_investigator(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.TIMEOUT,
[batch])
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.get('investigators/import_to_trial_data/<patient_pkey>/<ehr_id>')
async def import_screening_data(request, patient_pkey, ehr_id):
"""Updates auth information for the authorized account"""
res_json = general.get_response_from_ehr(request, "/ehrs/" + patient_pkey + "/" + ehr_id)
investigator_pkey = general.get_request_key_header(request)
client_signer = general.get_signer(request, investigator_pkey)
data_json = res_json['data']
if not data_json:
raise ApiBadRequest("Can not retrieve '" + ehr_id + "' EHR ' for '" + patient_pkey + "' patient")
data_txn = trial_transaction.add_data(
txn_signer=client_signer,
batch_signer=client_signer,
uid=data_json['id'],
height=data_json['height'],
weight=data_json['weight'],
a1c=data_json['A1C'],
fpg=data_json['FPG'],
ogtt=data_json['OGTT'],
rpgt=data_json['RPGT'],
event_time=data_json['event_time'])
batch, batch_id = trial_transaction.make_batch_and_id([data_txn], client_signer)
await security_messaging.import_screening_data(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], investigator_pkey)
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.get('investigators/data')
async def get_all_data_from_investigators(request):
"""Fetches complete details of all Accounts in state"""
client_key = general.get_request_key_header(request)
data_list = await security_messaging.get_data_from_investigators(request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN, client_key)
data_list_json = []
for address, data in data_list.items():
data_list_json.append({
'id': data.id,
'height': data.height,
'weight': data.weight,
'A1C': data.A1C,
'FPG': data.FPG,
'OGTT': data.OGTT,
'RPGT': data.RPGT,
'event_time': data.event_time,
'eligible': data.eligible
})
return response.json(body={'data': data_list_json},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.post('investigators/data/update')
async def update_data(request):
client_key = general.get_request_key_header(request)
required_fields = ['id', 'height', 'weight', 'A1C', 'FPG', 'OGTT', 'RPGT']
general.validate_fields(required_fields, request.json)
uid = request.json.get('id')
height = request.json.get('height')
weight = request.json.get('weight')
A1C = request.json.get('A1C')
FPG = request.json.get('FPG')
OGTT = request.json.get('OGTT')
RPGT = request.json.get('RPGT')
client_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()
client_txn = trial_transaction.update_data(
txn_signer=client_signer,
batch_signer=client_signer,
uid=uid,
height=height,
weight=weight,
a1c=A1C,
fpg=FPG,
ogtt=OGTT,
rpgt=RPGT)
batch, batch_id = trial_transaction.make_batch_and_id([client_txn], client_signer)
await security_messaging.update_investigator(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.get('investigators/request_inform_consent/<patient_pkey>')
async def request_inform_consent(request, patient_pkey):
"""Updates auth information for the authorized account"""
client_key = general.get_request_key_header(request)
client_signer = general.get_signer(request, client_key)
grant_read_ehr_permission_txn = consent_transaction.request_inform_document_consent(
txn_signer=client_signer,
batch_signer=client_signer,
patient_pkey=patient_pkey)
batch, batch_id = trial_transaction.make_batch_and_id([grant_read_ehr_permission_txn], client_signer)
await security_messaging.request_inform_document_consent(
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.CONSENT_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
# Used
@INVESTIGATORS_BP.post('investigators/data/eligible')
async def set_eligible(request):
client_key = general.get_request_key_header(request)
required_fields = ['id', 'eligible']
general.validate_fields(required_fields, request.json)
uid = request.json.get('id')
eligible = bool(request.json.get('eligible'))
client_signer = request.app.config.SIGNER_INVESTIGATOR # .get_public_key().as_hex()
client_txn = trial_transaction.set_eligible(
txn_signer=client_signer,
batch_signer=client_signer,
uid=uid,
eligible=eligible)
batch, batch_id = trial_transaction.make_batch_and_id([client_txn], client_signer)
await security_messaging.set_eligible(
request.app.config.INVESTIGATOR_VAL_CONN,
request.app.config.CONSENT_VAL_CONN,
request.app.config.TIMEOUT,
[batch], client_key)
try:
await security_messaging.check_batch_status(
request.app.config.INVESTIGATOR_VAL_CONN, [batch_id])
except (ApiBadRequest, ApiInternalError) as err:
# await auth_query.remove_auth_entry(
# request.app.config.DB_CONN, request.json.get('email'))
raise err
return response.json(body={'status': general.DONE},
headers=general.get_response_headers())
|
py | 1a488cd5a9a1b596b7c1f05975d443949dd032c3 | import collections as co
import itertools as it
#argument_parser = argparse.ArgumentParser()
#argument_parser.add_argument("jff_path", metavar="jff-file", type=str)
#args = argument_parser.parse_args()
_Original = co.namedtuple('_Original', ('symbol',))
_Term = co.namedtuple('_Term', ('symbol',))
_Bin = co.namedtuple('_Bin', ('string',))
_Start = co.namedtuple('_Start', ())
Grammar = co.namedtuple('Grammar', ('rules'))
def _parse_rule(rule):
tag, leaf, (
(left_tag, left_leaf, left),
(right_tag, right_leaf, right),
) = rule
assert tag == "production"
assert not leaf
assert left_tag == "left"
assert left_leaf
assert right_tag == "right"
assert right_leaf
return left, right
def parse(structure):
assert structure.type == "grammar"
rules = {}
for left, target in map(_parse_rule, structure.body):
assert len(left) == 1
if left not in rules:
rules[left] = set()
if target is None:
rules[left].add(())
continue
rules[left].add(tuple(target))
return rules
def _copy_rules(rules):
return {left: set(targets) for left, targets in rules.items()}
def _chomsky_normalize_rename(rules):
return {
("original", left): {
tuple(("original", symbol) for symbol in target)
for target in targets
}
for left, targets in rules.items()
}
def _chomsky_normalize_start(rules, start):
new_rules = {**rules, ('start',): {(start,)}}
_copy_rules(rules)
new_rules['start', ] = {(start,)}
return new_rules
def _compute_symbols(rules):
symbols = set()
for source, targets in rules.items():
for target in targets:
symbols |= set(target)
return symbols
def _chomsky_normalize_term(rules):
new_rules = {
source: {
tuple(
("term", symbol) if symbol not in rules else symbol
for symbol in target
)
for target in targets
}
for source, targets in rules.items()
}
for symbol in _compute_symbols(rules) - set(rules.keys()):
new_rules[("term", symbol)] = {(symbol,)}
return new_rules
def _chomsky_normalize_bin(rules):
new_rules = {}
for source, targets in rules.items():
new_rules[source] = set()
for target in targets:
if len(target) <= 2:
new_rules[source].add(target)
continue
new_rules[source].add((target[0], ("bin", target[1:])))
for symbol_i, symbol in enumerate(target[1:-2], start=1):
new_rules["bin", target[symbol_i:]] = {
(symbol, ("bin", target[symbol_i + 1:]))
}
new_rules["bin", target[-2:]] = {target[-2:]}
return new_rules
def _inline_nullable(string, symbol):
if symbol not in string:
yield string
return
index = string.index(symbol)
for rest in _inline_nullable(string[index + 1:], symbol):
yield string[:index] + rest
yield string[: index + 1] + rest
def _chomsky_normalize_del(rules):
nullables = set()
new_nullables = True
while new_nullables:
new_nullables = False
for source, targets in rules.items():
if source in nullables:
continue
for target in targets:
nullable = True
for symbol in target:
if symbol not in nullables:
nullable = False
break
if nullable:
nullables.add(source)
new_nullables = True
break
new_rules = _copy_rules(rules)
for source, targets in rules.items():
for target in targets:
for nullable in set(target) & nullables:
for new_target in _inline_nullable(target, nullable):
new_rules[source].add(new_target)
for source in nullables:
new_rules[source].discard(())
return new_rules
def _chomsky_normalize_unit_for_symbol(rules, source, seen=set()):
for target in rules[source]:
if not (len(target) == 1 and target[0] in rules):
yield target
continue
for symbol in target:
if symbol in seen:
continue
yield from _chomsky_normalize_unit_for_symbol(
rules, symbol, seen | {source}
)
def _chomsky_normalize_unit(rules):
return {
source: set(_chomsky_normalize_unit_for_symbol(rules, source))
for source in rules
}
def _chomsky_normalize(rules, start):
return _chomsky_normalize_prettify(
_chomsky_normalize_unit(
_chomsky_normalize_del(
_chomsky_normalize_bin(
_chomsky_normalize_term(
_chomsky_normalize_start(
_chomsky_normalize_rename(rules), start
)
)
)
)
)
)
def _prettify_symbol(symbol):
symbol_type, *args = symbol
if symbol_type == "original":
return args[0]
elif symbol_type == "term":
return "T{}".format(_prettify_symbol(args[0]))
elif symbol_type == "start":
return "start"
elif symbol_type == "bin":
return tuple(map(_prettify_symbol, args[0]))
return symbol
def _chomsky_normalize_prettify(rules):
return {
_prettify_symbol(source): {
tuple(_prettify_symbol(symbol) for symbol in target)
for target in targets
}
for source, targets in rules.items()
}
def _cyk_products(rules, string):
singles = co.defaultdict(set)
pairs = co.defaultdict(set)
for source, targets in rules.items():
for target in targets:
(singles if len(target) == 1 else pairs)[source].add(
target
)
products = co.defaultdict(lambda: co.defaultdict(set))
for source, targets in singles.items():
for target in targets:
products[source][target].add(target)
for substring_length in range(2, len(string) + 1):
for position in range(len(string) - substring_length + 1):
substring = string[position: position + substring_length]
for split in range(1, substring_length):
left_string, right_string = (
substring[:split],
substring[split:],
)
for source, targets in pairs.items():
for left, right in targets:
if (
left_string in products[left]
and right_string in products[right]
):
for left_tree, right_tree in it.product(
products[left][left_string],
products[right][right_string],
):
products[source][substring].add(
(
(left, left_tree),
(right, right_tree),
)
)
return products
def _cyk(rules, string, start):
for tree in _cyk_products(rules, string)[start][string]:
yield start, tree
def _format_parse_tree_lines(tree):
node, children = tree
if len(children) == 1:
yield "{!r},".format((node, children))
return
node, (left, right) = tree
head = "({!r}, ".format(node)
left_lines = _format_parse_tree_lines(left)
yield head + next(left_lines)
for line in left_lines:
yield " " * len(head) + line
for line in _format_parse_tree_lines(right):
yield " " * len(head) + line
def _format_parse_tree(tree):
return "\n".join(_format_parse_tree_lines(tree))
def run(rules, string, start):
return _cyk(_chomsky_normalize(rules, start))
#
#
# cnf = _chomsky_normalize(
# _parse_grammar(parser._parse_jff_structure(args.jff_path)),
# ("original", "S"),
# )
#
# for tree in _cyk(cnf, tuple("000#100"), "S"):
# print(_format_parse_tree(tree))
#
|
py | 1a488d039f2ddeb5bda029d161de12a6de4ea4d0 | #!/usr/bin/env python
"""
Tables dependencies in an Oracle query
"""
import lib_oracle
import lib_common
import lib_sql
from sources_types.sql import query as sql_query
from sources_types.oracle import query as oracle_query
def Main():
# cgiEnv = lib_common.CgiEnv()
cgiEnv = lib_oracle.OracleEnv()
grph = cgiEnv.GetGraph()
sqlQuery = sql_query.GetEnvArgs(cgiEnv)
dbNam = cgiEnv.m_entity_id_dict["Db"]
# This is simply the user.
oraSchema = cgiEnv.OracleSchema()
nodeSqlQuery = oracle_query.MakeUri(sqlQuery,dbNam)
propSheetToQuery = lib_common.MakeProp("Table dependency")
list_of_table_names = lib_sql.TableDependencies(sqlQuery)
list_of_nodes = oracle_query.QueryToNodesList(sqlQuery,{"Db":dbNam },list_of_table_names,oraSchema)
for nodTab in list_of_nodes:
grph.add( ( nodeSqlQuery, propSheetToQuery, nodTab ) )
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
|
py | 1a488d238f6ed2e6dfa04e857d509ae4ebb08e5c | #!/usr/bin/env python
import sys
from gribapi import *
from array import array
import random
import traceback
import itertools
VERBOSE=1
WRITE=0
class Usage(Exception):
def __init__(self):
pass
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def test():
# test new from sample
#grib_release(grib_new_from_samples("GRIB2"))
if len(sys.argv) < 2:
raise Usage
infile = sys.argv[1]
index_keys = ["shortName","level","number","step"]
print "indexing..."
iid = grib_index_new_from_file(infile,index_keys)
print "end indexing..."
index_vals = []
for key in index_keys:
print "%sSize=%d" % (
key,
grib_index_get_size(iid,key)
)
key_vals = grib_index_get_string(iid,key)
print " ".join(key_vals)
index_vals.append(key_vals)
for prod in product(*index_vals):
for i in range(len(index_keys)):
grib_index_select_string(iid,index_keys[i],str(prod[i]))
while 1:
gid = grib_new_from_index(iid)
if gid is None: break
print " ".join(["%s=%s" % (key,grib_get_string(gid,key)) for key in index_keys])
grib_release(gid)
grib_index_release(iid)
def main():
try:
test()
except GribInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
print >>sys.stderr,err.msg
return 1
except Usage:
print "Usage: %s infile" % sys.argv[0]
sys.exit(2)
if __name__ == "__main__":
main()
#print "------------------------------------"
|
py | 1a488e24b4e89748d3fe2c34992a605943780a62 | from image_cache.cache import *
from image_cache import itertools
|
py | 1a488eeb40c55244491eb0d0fc4466ff40558bd8 | from decimal import Decimal
from typing import Optional, Dict, List
from django import forms
from django.utils.translation import gettext as _
from rest_framework.request import Request
from polaris.integrations import WithdrawalIntegration, calculate_fee
from polaris.models import Transaction, Asset
from polaris.sep10.token import SEP10Token
from polaris.templates import Template
from polaris.utils import getLogger
from ..forms import WithdrawForm
from .sep24_kyc import SEP24KYC
from ..models import PolarisStellarAccount, PolarisUserTransaction
logger = getLogger(__name__)
class MyWithdrawalIntegration(WithdrawalIntegration):
def form_for_transaction(
self,
request: Request,
transaction: Transaction,
post_data=None,
amount=None,
*args,
**kwargs,
) -> Optional[forms.Form]:
kyc_form, content = SEP24KYC.check_kyc(transaction, post_data)
if kyc_form:
return kyc_form
elif content or transaction.amount_in:
return None
elif post_data:
return WithdrawForm(transaction, post_data)
else:
return WithdrawForm(transaction, initial={"amount": amount})
def content_for_template(
self,
request: Request,
template: Template,
form: Optional[forms.Form] = None,
transaction: Optional[Transaction] = None,
*args,
**kwargs,
) -> Optional[Dict]:
na, content = SEP24KYC.check_kyc(transaction)
if content:
return content
elif template == Template.WITHDRAW:
if not form:
return None
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
"guidance": (
_(
"Please enter the banking details for the account "
"you would like to receive your funds."
)
),
}
else: # template == Template.MORE_INFO
return {
"title": _("Polaris Transaction Information"),
"icon_label": _("Stellar Development Foundation"),
}
def after_form_validation(
self,
request: Request,
form: forms.Form,
transaction: Transaction,
*args,
**kwargs,
):
try:
SEP24KYC.track_user_activity(form, transaction)
except RuntimeError:
# Since no polaris account exists for this transaction, KYCForm
# will be returned from the next form_for_transaction() call
logger.exception(
f"KYCForm was not served first for unknown account, id: "
f"{transaction.stellar_account}"
)
def process_sep6_request(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args,
**kwargs,
) -> Dict:
account = (
PolarisStellarAccount.objects.filter(
account=params["account"],
memo=params["memo"],
memo_type=params["memo_type"],
)
.select_related("user")
.first()
)
if not account:
return {
"type": "non_interactive_customer_info_needed",
"fields": [
"first_name",
"last_name",
"email_address",
"bank_number",
"bank_account_number",
],
}
elif not (account.user.bank_account_number and account.user.bank_number):
return {
"type": "non_interactive_customer_info_needed",
"fields": ["bank_number", "bank_account_number"],
}
elif params["type"] != "bank_account":
raise ValueError(_("'type' must be 'bank_account'"))
elif not params["dest"]:
raise ValueError(_("'dest' is required"))
elif not params["dest_extra"]:
raise ValueError(_("'dest_extra' is required"))
elif not account.confirmed:
# Here is where you would normally return something like this:
# {
# "type": "customer_info_status",
# "status": "pending"
# }
# However, we're not going to block the client from completing
# the flow since this is a reference server.
pass
asset = params["asset"]
min_amount = round(asset.withdrawal_min_amount, asset.significant_decimals)
max_amount = round(asset.withdrawal_max_amount, asset.significant_decimals)
if params["amount"]:
if not (min_amount <= params["amount"] <= max_amount):
raise ValueError(_("invalid 'amount'"))
transaction.amount_in = params["amount"]
transaction.amount_fee = calculate_fee(
{
"amount": params["amount"],
"operation": "withdraw",
"asset_code": asset.code,
}
)
transaction.amount_out = round(
transaction.amount_in - transaction.amount_fee,
asset.significant_decimals,
)
transaction.save()
response = {
"account_id": asset.distribution_account,
"min_amount": min_amount,
"max_amount": max_amount,
"fee_fixed": round(asset.withdrawal_fee_fixed, asset.significant_decimals),
"fee_percent": asset.withdrawal_fee_percent,
}
if params["memo_type"] and params["memo"]:
response["memo_type"] = params["memo_type"]
response["memo"] = params["memo"]
PolarisUserTransaction.objects.create(
transaction_id=transaction.id, user=account.user, account=account
)
return response
def interactive_url(
self,
request: Request,
transaction: Transaction,
asset: Asset,
amount: Optional[Decimal],
callback: Optional[str],
*args: List,
**kwargs: Dict,
) -> Optional[str]:
raise NotImplementedError()
def save_sep9_fields(
self,
token: SEP10Token,
request: Request,
stellar_account: str,
fields: Dict,
language_code: str,
muxed_account: Optional[str] = None,
account_memo: Optional[str] = None,
account_memo_type: Optional[str] = None,
*args: List,
**kwargs: Dict,
):
raise NotImplementedError()
def patch_transaction(
self,
token: SEP10Token,
request: Request,
params: Dict,
transaction: Transaction,
*args: List,
**kwargs: Dict,
):
raise NotImplementedError()
|
py | 1a489011edb23adbfd4e6d1d40d4b3b90637b159 | import csv
import os.path
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
matplotlib.rcParams.update({'font.size': 15})
n_trial = 5
top_k = 1
batch_size = 4000
max_step = np.inf
max_reward = np.inf
min_reward = -np.inf
exp_name = 'CartpoleNd'
exp_param = 'D1K05A05Ec10'
extra_name = ''
prepath = "../" + exp_name + "/Data/AST/Lexington/" + exp_param
plot_path = "../" + exp_name + "/Data/Plot/top" + str(top_k) + "/"
policies = [
# "TRPO",\
"MCTSRS",\
# "MCTSAS",\
# "MCTSBV",\
# "GAP100T20K3Step1.0Fmean","GASMP100T20K3Step1.0Fmean",\
# "GAP500T20K3Step1.0Fmean","GASMP500T20K3Step1.0Fmean",\
]
plot_name = exp_name + '_' + exp_param + 'avgtop' + str(top_k) + 'trial' + str(n_trial) + extra_name
plts = []
legends = []
fig = plt.figure(figsize=(10, 10))
for (policy_index, policy) in enumerate(policies):
print(policy)
for trial in range(n_trial):
file_path = prepath + '/' + policy + '/' + str(trial) + '/process.csv'
if os.path.exists(file_path):
print(trial)
steps = []
rewards = []
with open(file_path) as csv_file:
if '\0' in open(file_path).read():
print("you have null bytes in your input file")
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
else:
csv_reader = csv.reader(csv_file, delimiter=',')
for (i, row) in enumerate(csv_reader):
if i == 0:
entry_dict = {}
for index in range(len(row)):
entry_dict[row[index]] = index
else:
# print(row[entry_dict["StepNum"]])
if int(row[entry_dict["StepNum"]]) > max_step:
break
if int(row[entry_dict["StepNum"]]) % batch_size == 0:
steps.append(int(row[entry_dict["StepNum"]]))
avg_top = 0.0
for k in range(top_k):
avg_top += np.clip(float(row[entry_dict["reward " + str(k)]]), min_reward, max_reward)
avg_top /= top_k
rewards.append(avg_top)
plot, = plt.plot(steps, rewards)
# plot, = plt.plot(steps,np.mean(np.exp(Rewards),0))
# plot,_,_ = plt.errorbar(steps,np.mean(Rewards,0),yerr=np.std(Rewards,0)/np.sqrt(n_trial),errorevery=10)
plts.append(plot)
legends.append(policy + '_' + str(trial))
plt.legend(plts, legends)
plt.xlabel('Step Number')
plt.ylabel('Top ' + str(top_k) + ' Reward')
fig.savefig(plot_path + plot_name)
plt.close(fig)
|
py | 1a48910bd4cf0a094a15231f2a2a3b67fb6ec43f | import os
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import GradScaler, autocast
from scripts.focalloss import FocalLoss
from Transformers_VQA.dataset_final import make_final_loader
from Transformers_VQA.modified_uniter_attnbias_rcnn_SBERT_graph import Modified_Uniter_attnbias_rcnn_SBERT_graph
def train():
# Constant setup
BATCH_SIZE = 3
BATCH_SIZE_DEV = 1
LR = 5e-6
N_EPOCH = 30
GAMMA = 2
ALPHA = 5
print(f'UNITERonCLIPBERT_attnbias_rcnn_SBERT_graph batch_size={BATCH_SIZE}, Adam_lr={LR}, FocalAlpha={ALPHA}, GAMMA={GAMMA}')
torch.manual_seed(21)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# Make loaders
train_loader = make_final_loader('train', BATCH_SIZE, rcnn=True)
dev_loader = make_final_loader('dev', BATCH_SIZE_DEV, rcnn=True)
# Setup Tensorboard
writer = SummaryWriter(comment = f'UNITERonCLIPBERT_attnbias_rcnn_SBERT_graph batch_size={BATCH_SIZE}, Adam_lr={LR}, FocalAlpha={ALPHA}, GAMMA={GAMMA}')
# multiply by layer, do an embedding for indices 1 to 12
mask_stepper = torch.ones(1, 12, 512, 512).to(device)
for i in range(12):
mask_stepper[0, i, :, :] *= i+1
# Eval for F1
def eval(model):
model.eval()
with torch.no_grad():
total_hit, total_pred_positive, total_truth_positive, total_loss, total_pred = 0, 0, 0, [], 0
for idx, batch in enumerate(dev_loader):
input_ids = batch['input_ids'].to(device)
txt_seg_ids = batch['txt_seg_ids'].to(device)
vis_feats = batch['vis_feats'].to(device)
obj_embs = batch['obj_embs_SBERT'].to(device)
obj_ids = batch['obj_ids'].to(device)
pos_x = batch['pos_x'].to(device)
pos_y = batch['pos_y'].to(device)
pos_z = batch['pos_z'].to(device)
bboxes = batch['bboxes'].to(device)
vis_seg = batch['vis_seg'].to(device)
extended_attention_mask = batch['extended_attention_mask'].to(device)
output_mask = batch['output_mask'].to(device)
reference = batch['reference'].to(device)
scene_seg = batch['scene_segs'].to(device)
rel_mask_left = batch['rel_mask_left'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_right = batch['rel_mask_right'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_up = batch['rel_mask_up'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_down = batch['rel_mask_down'].to(device).unsqueeze(0).unsqueeze(2)
rel_masks = torch.cat((rel_mask_left, rel_mask_right, rel_mask_up, rel_mask_down), axis=0)
rel_masks = rel_masks * mask_stepper
pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask, scene_seg, rel_masks)
pred = pred.reshape(1,-1)
pred = pred[output_mask==1].reshape(-1,1)
truth = reference.float().reshape(-1,1)
loss = criterion(pred, truth).detach()
pred_bin = pred > 0
truth_bin = truth > 0.5
hit = torch.sum(pred_bin*truth_bin == 1).detach()
pred_positive = torch.sum(pred > 0).detach()
truth_positive = torch.sum(truth > 0.5).detach()
total_loss.append(float(loss))
total_hit += int(hit)
total_pred_positive += int(pred_positive)
total_truth_positive += int(truth_positive)
total_pred += int(pred.shape[0])
print('#pred positives',total_pred_positive)
print('#groundtruth positives',total_truth_positive)
print('#total pred', total_pred)
print('#hit', total_hit)
total_loss = sum(total_loss)/len(total_loss)
if (total_pred_positive == 0):
total_pred_positive = 1e10
prec = total_hit / total_pred_positive
recall = total_hit / total_truth_positive
try:
f1 = 2/(1/prec + 1/recall)
except:
f1 = 0
return total_loss, prec, recall, f1
# Training setup
model = Modified_Uniter_attnbias_rcnn_SBERT_graph().to(device)
criterion = FocalLoss(gamma=GAMMA, alpha=ALPHA)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scaler = GradScaler()
# Train
n_iter = 0
n_prev_iter = 0
running_loss = 0
for epoch in range(N_EPOCH):
for batch_idx, batch in enumerate(train_loader):
model.train()
optimizer.zero_grad()
input_ids = batch['input_ids'].to(device)
txt_seg_ids = batch['txt_seg_ids'].to(device)
vis_feats = batch['vis_feats'].to(device)
obj_embs = batch['obj_embs_SBERT'].to(device)
obj_ids = batch['obj_ids'].to(device)
pos_x = batch['pos_x'].to(device)
pos_y = batch['pos_y'].to(device)
pos_z = batch['pos_z'].to(device)
bboxes = batch['bboxes'].to(device)
vis_seg = batch['vis_seg'].to(device)
extended_attention_mask = batch['extended_attention_mask'].to(device)
output_mask = batch['output_mask'].to(device)
reference = batch['reference'].to(device)
scene_seg = batch['scene_segs'].to(device)
rel_mask_left = batch['rel_mask_left'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_right = batch['rel_mask_right'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_up = batch['rel_mask_up'].to(device).unsqueeze(0).unsqueeze(2)
rel_mask_down = batch['rel_mask_down'].to(device).unsqueeze(0).unsqueeze(2)
rel_masks = torch.cat((rel_mask_left, rel_mask_right, rel_mask_up, rel_mask_down), axis=0)
rel_masks = rel_masks * mask_stepper
truth = reference.float().reshape(-1,1)
# To fix: NaN under mixed precision
# with autocast():
# pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask)
# pred = pred.reshape(1,-1)
# pred = pred[output_mask==1].reshape(-1,1)
# loss = criterion(pred, truth)
# scaler.scale(loss).backward()
# scaler.step(optimizer)
# scaler.update()
pred = model(input_ids , txt_seg_ids, vis_feats, obj_embs, obj_ids, pos_x, pos_y, pos_z, bboxes, vis_seg, extended_attention_mask, scene_seg, rel_masks)
pred = pred.reshape(1,-1)
pred = pred[output_mask==1].reshape(-1,1)
loss = criterion(pred, truth)
loss.backward()
optimizer.step()
n_iter += 1
writer.add_scalar('Loss/train_batch', loss, n_iter)
running_loss += loss.detach()
if batch_idx % 2000 == 0:
print(pred.reshape(-1))
print(truth.reshape(-1))
print(running_loss/(n_iter-n_prev_iter))
loss, prec, recall, f1 = eval(model)
writer.add_scalar('Loss/train_avg', running_loss/(n_iter-n_prev_iter), n_iter)
n_prev_iter = n_iter
running_loss = 0
writer.add_scalar('Loss/dev', loss, n_iter)
writer.add_scalar('Precision/dev', prec, n_iter)
writer.add_scalar('Recall/dev', recall, n_iter)
writer.add_scalar('F1/dev', f1, n_iter)
try:
os.makedirs(f'./checkpoint/UNITERonCLIPBERT_attnbiasRcnn_SBERT_graph_n_batchsize{BATCH_SIZE}_lr{LR}_FocalALPHA{ALPHA}_GAMMA{GAMMA}')
except:
pass
torch.save({
'epoch': epoch,
'step': n_iter,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'dev_loss': loss,
}, f'./checkpoint/UNITERonCLIPBERT_attnbiasRcnn_SBERT_graph_n_batchsize{BATCH_SIZE}_lr{LR}_FocalALPHA{ALPHA}_GAMMA{GAMMA}/{epoch}_{batch_idx}_{loss}_{f1}.bin')
print('DONE !!!')
if __name__ == '__main__':
train() |
py | 1a48919c1532f52aacaf4cbef550548de7d12126 | #!/usr/bin/env python
# coding: utf-8
# # Star Type
#
# Author: Yufei Ren
#
# Course Project, UC Irvine, Math 10, W22
# ## Introduction
#
# The dataset "Star dataset to predict star types" consists several features of planets in 6 category: Brown Dwarf, Red Dwarf, White Dwarf, Main Sequence , SuperGiants, HyperGiants, and they are respectivley assigned with numbers 0, 1, 2, 3, 4, 5.
# >
# In this project, the temperature, radius, 'Absolute magnitude(Mv)', and luminorsity are first used to predict the star type. After that, sklearn is used to find the relationship between temperature, radius and luminorsity.
# ## Main portion of the project
#
# (You can either have all one section or divide into multiple sections)
# In[ ]:
import numpy as np
import pandas as pd
import seaborn as sns
import altair as alt
# In[ ]:
df = pd.read_csv("/work/6 class csv.csv")
df = df.dropna(axis=1) # clear the data
df.head()
# In[ ]:
df.describe()
# In[ ]:
df.columns
# Atair charts is used to visualize the dataset before predicting.
# In[ ]:
brush = alt.selection_interval()
c1 = alt.Chart(df).mark_point().encode(
x='Absolute magnitude(Mv)',
y='Radius(R/Ro):Q',
color='Star type:N'
).add_selection(brush)
c2= alt.Chart(df).mark_bar().encode(
x = 'Star type:N',
y='Absolute magnitude(Mv)'
).transform_filter(brush)
c1|c2
# ## Predict the Star type
# Firstly, KNeighborsClassifier is used to predict the star type
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
# In[ ]:
X = df.iloc[:,:4]
y = df["Star type"]
# Before using using K-Nearest Neighbors Classifier, a scaler is used to scale the input data to avoid errors.
# In[ ]:
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
# In[ ]:
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2)
# In[ ]:
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
loss_train = log_loss(y_train, clf.predict_proba(X_train))
loss_test = log_loss(y_test, clf.predict_proba(X_test))
# In[ ]:
print(f"The log_loss of X_train and y_train is {loss_train:.2f}")
print(f"The log_loss of X_test and y_test is {loss_test:.2f}")
# In[ ]:
df['predict_K'] = clf.predict(X_scaled)
# The logloss of testing data is not large, so there isn't a sign of overfitting
# In[ ]:
(df["Star type"] == df["predict_K"]).value_counts()
# Here we can see that the predicted data is very close to the real data, and there isn't a sign me over-fitting.
# ## Predict the Luminosity
# After using K Neraerst Neighbors to predict the type of a star, I am interested in finding how does radius and temperature are related to the luminorsity of a star.
# I first try the LinearRegressor
# In[ ]:
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
X2 = df[['Radius(R/Ro)','Temperature (K)']]
y2 = df['Luminosity(L/Lo)']
reg1 = LinearRegression()
reg1.fit(X2,y2)
MSE1 = mean_squared_error(y2,reg1.predict(X2))
MAE1 = mean_absolute_error(y2,reg1.predict(X2))
print(f"the coefficients of reg are {reg1.coef_}")
print(f"the intersept of reg is {reg1.intercept_}.")
print(f'The Mean square error is {MSE1:.3f}')
print(f'The Mean absolute error is {MAE1:.3f}')
# The MSE is too high at this case, then I choose to try the KneighborRegressor, and again, the input should be scaled first because they are not in the same unit.
# In[ ]:
from sklearn.neighbors import KNeighborsRegressor
scaler = StandardScaler()
scaler.fit(X2)
X2_scaled = scaler.transform(X2)
reg2 = KNeighborsRegressor(n_neighbors=4)
# In[ ]:
reg2.fit(X2_scaled, y2)
df['predict_l'] = reg2.predict(X2_scaled)
MSE2 = mean_squared_error(reg2.predict(X2_scaled),y2)
MAE2 = mean_absolute_error(reg2.predict(X2_scaled),y2)
print(f'The Mean square error is {MSE2:.3f}')
print(f'The Mean absolute error is {MAE2:.3f}')
# The number is still large, but smaller than the prediced error in linear regression. The reason for it might be that it is not a linear relationship, but a polynomial relationship.
#
# To check if it is a polynomial regression, the polynomialfeatures is used.
# In[ ]:
df3 = df.iloc[:,:3]
df3.columns
# In[ ]:
y_ply = df['Luminosity(L/Lo)']
X_ply = df[['Temperature (K)', 'Radius(R/Ro)']]
# In[ ]:
from sklearn.preprocessing import PolynomialFeatures
# Here I first created a dataframe that contains all posibilities of combination of temperature and radius within 9 degree.
# In[ ]:
poly = PolynomialFeatures(degree=9)
df_ply = pd.DataFrame(poly.fit_transform(X_ply))
df_ply.columns = poly.get_feature_names_out()
# In[ ]:
df_ply
# Then I apply linear regression on luminorsity and each predited polynomial combination, and caculate the error. In the end, I printed out the smallest error and its combination.
# In[ ]:
error_dict = {}
for column in df_ply:
reg = LinearRegression()
reg.fit(df_ply[[column]], y_ply)
error = mean_squared_error(reg.predict(df_ply[[column]]), y_ply)
error_dict[error] = column
print("the smallest mean squared error is", min(error_dict), 'from column', error_dict[min(error_dict)])
# Here we can see the lowest mean squred error is around 2.3 * 10^10, and the linear combinaiton is Radius^1 * Temperature^0
#
# The error is very large and a possible reason for that is that all star types are evaluated together and their ranges are in very different scales. As a result, different star types are evaluated separated below.
# In[ ]:
alt.Chart(df).mark_boxplot(extent='min-max').encode(
x='Star type:N',
y='Luminosity(L/Lo):Q'
)
# In the plotbox above, it is apparent that the ranges of luminosity of different star types are in very different scale
# In[ ]:
def find_combination(star_type):
df_star = df[df['Star type'] == star_type].iloc[:,:3]
X = df_star[['Temperature (K)', 'Radius(R/Ro)']]
y = df_star['Luminosity(L/Lo)']
poly = PolynomialFeatures(degree=9)
df_ply = pd.DataFrame(poly.fit_transform(X))
df_ply.columns = poly.get_feature_names_out()
error_dict = {}
for column in df_ply:
reg = LinearRegression()
reg.fit(df_ply[[column]], y)
error = mean_squared_error(reg.predict(df_ply[[column]]), y)
error_dict[error] = column
print(f"For the star type {star_type}, the smallest error is {min(error_dict)}, which is generagted form {error_dict[min(error_dict)]}")
# In[ ]:
for i in range(5):
find_combination(i)
# After applying polynomialfeatures to different star type separated, the mean squared error reduced apparently. However, different star type has lowest error with different polynomial combination. As a result, it is not safe to claim any polynomial combination of temperature and radius is the best to predict the Luminosity.
# ## Summary
#
# In this project, I am able to predic the star's type by using KneighborClasifier with comparatively high acurracy. However, a best polynomial combination of temperature and radius to predic the luminorsity is not find, because the best structures of different star types differ. As a result, a larger dataset is needed to get a more accurate result.
# ## References
# The dataset “6 class csv.csv” was adapted from [Star dataset to predict star types](https://www.kaggle.com/deepu1109/star-dataset)
# >
# The mthods and application of polynomialfeature was adapted from [sklearn.preprocessing.PolynomialFeatures](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html)
# >
# The idea of polynomialfeature is adapted from [Introduction to Polynomial Regression (with Python Implementation)](https://www.analyticsvidhya.com/blog/2020/03/polynomial-regression-python/)
# >
# The code of drawing altair histogram is adapted from [Simple Histogram](https://altair-viz.github.io/gallery/simple_histogram.html)
# >
# The code of drawing boxplot is adapted from [Boxplot with Min/Max Whiskers](https://altair-viz.github.io/gallery/boxplot.html#)
# <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=0fb54ba1-bdfd-468e-b41a-ed6482907af2' target="_blank">
# <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img>
# Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
|
py | 1a4891a2169a3b02e26a71ec8938042fb7937a57 | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#757. Set Intersection Size At Least Two
#An integer interval [a, b] (for integers a < b) is a set of all consecutive integers from a to b, including a and b.
#Find the minimum size of a set S such that for every integer interval A in intervals, the intersection of S with A has size at least 2.
#Example 1:
#Input: intervals = [[1, 3], [1, 4], [2, 5], [3, 5]]
#Output: 3
#Explanation:
#Consider the set S = {2, 3, 4}. For each interval, there are at least 2 elements from S in the interval.
#Also, there isn't a smaller size set that fulfills the above condition.
#Thus, we output the size of this set, which is 3.
#Example 2:
#Input: intervals = [[1, 2], [2, 3], [2, 4], [4, 5]]
#Output: 5
#Explanation:
#An example of a minimum sized set is {1, 2, 3, 4, 5}.
#Note:
#intervals will have length in range [1, 3000].
#intervals[i] will have length 2, representing some integer interval.
#intervals[i][j] will be an integer in [0, 10^8].
#class Solution:
# def intersectionSizeTwo(self, intervals):
# """
# :type intervals: List[List[int]]
# :rtype: int
# """
# Time Is Money |
py | 1a4891fe93c7020921cd7dc9982284148cd6fb2f | import csv
import flask
import operator
import sqlite3
app = flask.Flask(__name__)
@app.route('/api/ships/')
def ships():
cursor = get_db()
result = []
for ship in cursor.execute('select * from Ships'):
result.append({'name': str(ship[0]), 'imo': str(ship[1])})
return flask.jsonify(result)
@app.route('/api/positions/<imo>/')
def positions(imo):
cursor = get_db()
result = []
for p in cursor.execute('select * from Positions where imo = "%s"' % imo):
result.append({
'timestamp': p[1],
'latitude': p[2],
'longitude': p[3]
})
if not result:
flask.abort(404)
return flask.jsonify(sorted(result, key=operator.itemgetter('timestamp'),
reverse=True))
def _build_db():
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('''create table Ships (name text, imo integer)''')
c.execute('''create table Positions (
imo integer,
timestamp text,
latitude real,
longitude real)''')
conn.commit()
return conn
def fill_db(positions_file):
conn = _build_db()
cursor = conn.cursor()
ships = [('Mathilde Maersk', 9632179),
('Australian Spirit', 9247455),
('MSC Preziosa', 9595321)]
for ship in ships:
cursor.execute('insert into Ships values ("%s", %s)' % ship)
for row in csv.reader(positions_file):
cursor.execute('''insert into Positions values (%s, "%s", %s, %s)''' %
tuple(row))
conn.commit()
return cursor
def get_db():
if 'db' not in flask.g:
cursor = fill_db(open('positions.csv'))
flask.g.db = cursor
return flask.g.db
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
|
py | 1a48922546748ca4703d74b30892d0ef21489e21 | import subprocess
import tempfile
import os
clone_dir = os.path.join(tempfile.gettempdir(), 'scikit-beam-examples')
try:
ret = subprocess.check_output(
['git', 'clone', 'https://github.com/scikit-beam/scikit-beam-examples',
clone_dir])
except subprocess.CalledProcessError:
print("scikit-beam-examples already exists at %s" % (clone_dir))
print("resetting to the master branch")
subprocess.Popen(['git', 'remote', 'update'], cwd=clone_dir)
subprocess.Popen(['git', 'reset', '--hard', 'origin/master'],
cwd=clone_dir)
|
py | 1a48924c7af548708bae325ebec05115d7b873ff | def unsafeHas(label):
return lambda rec: label in rec
def unsafeGet(label):
return lambda rec: rec[label]
def unsafeSet(label):
return lambda val: lambda rec: {**rec, label: val}
def unsafeDelete(label):
def ap(rec):
copy = rec.copy()
del copy[label]
return copy
return ap
|
py | 1a489265d87a3ef43f282443dde6ab1ac5666928 | import argparse
import os
import torchvision.transforms as transforms
from src.datamanager import *
from src.datamanager import DataProvider
import src.datamanager.utils as datautils
from PIL import Image
from src.configs import *
from src.ml.net import PyNet
from src.results import performance
from src.results.reid import ReIDPerformance
import torchvision.transforms.functional as F
from src.ml.net.pt import factory as model_factory
from operator import itemgetter
from src.visualization import visualizer
import src.pyrnet.model as reid_model
import src.pyrnet.features as features
import src.pyrnet.metric as metric
# Arg parser
parser = argparse.ArgumentParser(description='ReID Net')
parser.add_argument('--dataset', default='Market-1501', type=str, metavar='STR', help='dataset name (default: Market-1501)')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N', help='number of data loading workers (default: 10)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--print-freq', '--p', default=20, type=int, metavar='N', help='print frequency (default: 20)')
parser.add_argument('--net', default='densenet', type=str, metavar='STR', help='network model (default: densenet)')
parser.add_argument('--depth', default=201, type=int, metavar='N', help='network model depth (default: 201)')
parser.add_argument('--bottleneck-size', default=512, type=int, metavar='N', help='classifier bottleneck size (default: 512)')
parser.add_argument('--pyr-feature-size', default=256, type=int, metavar='N', help='pyramidal maps (default: 256)')
parser.add_argument('--pyr-feature-size-dynamic', default=True, type=bool, metavar='B', help='pyramidal feature size dependent on detail level (default: True)')
parser.add_argument('--pyr-operator', default='max_pool', type=str, metavar='STR', help='pyramidal operator (default: max_pool)')
parser.add_argument('--pyr-levels', default=-1, type=int, metavar='N', help='pyramidal levels (default: -1 => dynamic)')
parser.add_argument('--metric', default='euclidean', type=str, metavar='STR', help='metric (default: euclidean')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='filename of latest checkpoint (default: empty => latest experiment)')
parser.add_argument('--epoch', default=100, type=int, metavar='N', help='evaluation epoch, used only if --checkpoint is not set (default: 100)')
parser.add_argument('--rerank', default=False, type=bool, metavar='B', help='enable re-ranking (default: False)')
def get_args():
return parser.parse_args()
""" ================================================================================================================
EVALUATION
============================================================================================================ """
def evaluate(args, net=None, dset_train=None, dset_test=None,
display_ranking_image_index=(0, 2, 10, 40, 60, 100, 120, 140, 160, 180, 200),
layer_embeddings=('emb\\bottleneck1', 'emb\\bottleneck2', 'emb\\bottleneck3', 'emb\\bottleneck4'),
sample_size=(384, 192)):
# Just check the parsed arguments
print(vars(args))
""" ----------------------------------------------------------------------------------------------------------------
DATA
------------------------------------------------------------------------------------------------------------ """
# Imagenet Normalization
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Data transformations
transformations = DataTransformer([
transforms.Resize(sample_size, interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize
])
transformations_flipped = DataTransformer([
transforms.Resize(sample_size, interpolation=Image.BICUBIC),
transforms.Lambda(lambda x: F.hflip(x)),
transforms.ToTensor(),
normalize])
# Dataset
if dset_train is None or dset_test is None:
dset_opts = DatasetConfig(args.dataset, None, (0.5, 0.5), cam_pair=(-1, -1))
dset = DatasetReID(dset_opts.name, os.path.join('data', dset_opts.name),
im_size=dset_opts.imsize, in_memory=False, keep_aspect_ratio=True)
# Splits
dset_train, dset_test = dset.split(dset_opts.split, save_load=True, make_each_split_contiguous=True)
# Data provider
data_provider = DataProvider(dset_test, loader=datautils.load_image, transform=transformations)
num_classes = len(dset_train.classes)
# Data provider flipped
data_provider_flipped = DataProvider(dset_test, loader=datautils.load_image, transform=transformations_flipped)
""" ----------------------------------------------------------------------------------------------------------------
MODEL
------------------------------------------------------------------------------------------------------------ """
if net is None:
# From which checkpoint do we need to load the model?
checkpoint = args.checkpoint
if checkpoint == '':
folder = os.path.join('data', 'experiments', args.dataset, os.listdir(os.path.join('data', 'experiments', args.dataset))[-1])
checkpoint = os.path.join(folder, 'checkpoint_epoch-{}.pth.tar'.format(args.epoch))
folder = os.path.dirname(checkpoint)
# Get model (load it from checkpoint!)
model = reid_model.get_model(args.net, args.depth,
data_provider[0][0].size(), num_classes,
bottleneck_size=args.bottleneck_size,
pyr_feature_size=args.pyr_feature_size,
pyr_operator=args.pyr_operator, pyr_feature_size_dynamic=args.pyr_feature_size_dynamic,
checkpoint_path=checkpoint)
# Make it parallel..
model = model_factory.make_it_parallel(model, 'multigpu')
# Net initialization
net = PyNet()
net.model = model
net.exp_folder = folder
# Move to GPU (if available)
net.to_gpu()
""" ----------------------------------------------------------------------------------------------------------------
FEATURES
------------------------------------------------------------------------------------------------------------ """
X_norm = []
data_providers = [data_provider, data_provider_flipped]
# Get features from the data providers
for ii, dp in enumerate(data_providers):
X_norm_new = features.get_features(net, [dp], layer_embeddings=layer_embeddings, batch_size=args.batch_size, workers=args.workers)
# Concat
X_norm.extend(X_norm_new)
""" ----------------------------------------------------------------------------------------------------------------
MATCH
------------------------------------------------------------------------------------------------------------ """
# Match images (re-rank if needed)
D, D_rerank, probe_info, gallery_info = metric.get_distance(dset_test, X_norm, args.metric, re_rank=args.rerank)
# Unpack matching info
probe_idx, probe_id, probe_cam = probe_info
gallery_idx, gallery_id, gallery_cam = gallery_info
""" ----------------------------------------------------------------------------------------------------------------
PERFORMANCE
------------------------------------------------------------------------------------------------------------ """
# CMC
reid_perf = ReIDPerformance()
reid_perf.compute(-D, probe_idx, gallery_idx,probe_id, gallery_id, probe_cam=probe_cam, gallery_cam=gallery_cam)
data_to_print = [reid_perf.cmc[0], reid_perf.cmc[4], reid_perf.cmc[9], reid_perf.cmc[19], reid_perf.cmc[49], reid_perf.nauc, reid_perf.ap.mean()*100]
res_string = 'CMC [1-5-10-20-50]: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -- nAUC: {:.2f} -- mAP: {:.2f}'.format(*data_to_print)
print(res_string)
# CMC plot
visualizer.plot_cmc(reid_perf.cmc, legend='Rank-1: {:.2f} - mAP: {:.2f}'.format(reid_perf.cmc[0], reid_perf.ap.mean()*100), title=str(layer_embeddings), render_on_screen=True)
reid_perf_rerank = ReIDPerformance()
if D_rerank is not None:
# CMC with rerank
reid_perf_rerank.compute(-D_rerank, probe_idx, gallery_idx,probe_id, gallery_id, probe_cam=probe_cam, gallery_cam=gallery_cam)
data_to_print = [reid_perf_rerank.cmc[0], reid_perf_rerank.cmc[4], reid_perf_rerank.cmc[9], reid_perf_rerank.cmc[19], reid_perf_rerank.cmc[49], reid_perf_rerank.nauc, reid_perf_rerank.ap.mean()*100]
res_string = 'Re-Rank => CMC [1-5-10-20-50]: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -- nAUC: {:.2f} -- mAP: {:.2f}'.format(*data_to_print)
print(res_string)
img = visualizer.plot_cmc(reid_perf_rerank.cmc, legend='Rank-1: {:.2f} - mAP: {:.2f}'.format(reid_perf_rerank.cmc[0], reid_perf_rerank.ap.mean()*100), title=str(layer_embeddings), render_on_screen=True)
# Matching images
dp = DataProvider(dset_test, loader=datautils.load_image)
matching_images = performance.get_matching_images(dp, dp, reid_perf.matching_indexes, N=15, selected_indexes=display_ranking_image_index)
matching_ids = itemgetter(*display_ranking_image_index)(reid_perf.matching_ids)
visualizer.display_ranked_matching_images(matching_images, matching_ids=matching_ids, im_size=(256, 256), render_on_screen=True, true_match_line_width=10)
return reid_perf, reid_perf_rerank
if __name__ == '__main__':
args = get_args()
evaluate(args)
|
py | 1a489274a275248bf6d61c313c9252f37ec92b37 | from abc import ABCMeta, abstractmethod
from collections.abc import Iterable
from numbers import Integral
from typing import Callable
import operator
from functools import reduce
import numpy as np
import scipy.sparse as ss
from ._umath import elemwise
from ._utils import _zero_of_dtype, html_table, equivalent, normalize_axis
_reduce_super_ufunc = {np.add: np.multiply, np.multiply: np.power}
class SparseArray:
"""
An abstract base class for all the sparse array classes.
Attributes
----------
dtype : numpy.dtype
The data type of this array.
fill_value : scalar
The fill value of this array.
"""
__metaclass__ = ABCMeta
def __init__(self, shape, fill_value=None):
if not isinstance(shape, Iterable):
shape = (shape,)
if not all(isinstance(l, Integral) and int(l) >= 0 for l in shape):
raise ValueError(
"shape must be an non-negative integer or a tuple "
"of non-negative integers."
)
self.shape = tuple(int(l) for l in shape)
if fill_value is not None:
if not hasattr(fill_value, "dtype") or fill_value.dtype != self.dtype:
self.fill_value = self.dtype.type(fill_value)
else:
self.fill_value = fill_value
else:
self.fill_value = _zero_of_dtype(self.dtype)
dtype = None
@property
@abstractmethod
def nnz(self):
"""
The number of nonzero elements in this array. Note that any duplicates in
:code:`coords` are counted multiple times. To avoid this, call :obj:`COO.sum_duplicates`.
Returns
-------
int
The number of nonzero elements in this array.
See Also
--------
DOK.nnz : Equivalent :obj:`DOK` array property.
numpy.count_nonzero : A similar Numpy function.
scipy.sparse.coo_matrix.nnz : The Scipy equivalent property.
Examples
--------
>>> import numpy as np
>>> from sparse import COO
>>> x = np.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 0])
>>> np.count_nonzero(x)
6
>>> s = COO.from_numpy(x)
>>> s.nnz
6
>>> np.count_nonzero(x) == s.nnz
True
"""
@property
def ndim(self):
"""
The number of dimensions of this array.
Returns
-------
int
The number of dimensions of this array.
See Also
--------
DOK.ndim : Equivalent property for :obj:`DOK` arrays.
numpy.ndarray.ndim : Numpy equivalent property.
Examples
--------
>>> from sparse import COO
>>> import numpy as np
>>> x = np.random.rand(1, 2, 3, 1, 2)
>>> s = COO.from_numpy(x)
>>> s.ndim
5
>>> s.ndim == x.ndim
True
"""
return len(self.shape)
@property
def size(self):
"""
The number of all elements (including zeros) in this array.
Returns
-------
int
The number of elements.
See Also
--------
numpy.ndarray.size : Numpy equivalent property.
Examples
--------
>>> from sparse import COO
>>> import numpy as np
>>> x = np.zeros((10, 10))
>>> s = COO.from_numpy(x)
>>> s.size
100
"""
# We use this instead of np.prod because np.prod
# returns a float64 for an empty shape.
return reduce(operator.mul, self.shape, 1)
@property
def density(self):
"""
The ratio of nonzero to all elements in this array.
Returns
-------
float
The ratio of nonzero to all elements.
See Also
--------
COO.size : Number of elements.
COO.nnz : Number of nonzero elements.
Examples
--------
>>> import numpy as np
>>> from sparse import COO
>>> x = np.zeros((8, 8))
>>> x[0, :] = 1
>>> s = COO.from_numpy(x)
>>> s.density
0.125
"""
return self.nnz / self.size
def _repr_html_(self):
"""
Diagnostic report about this array.
Renders in Jupyter.
"""
return html_table(self)
@abstractmethod
def asformat(self, format):
"""
Convert this sparse array to a given format.
Parameters
----------
format : str
A format string.
Returns
-------
out : SparseArray
The converted array.
Raises
------
NotImplementedError
If the format isn't supported.
"""
@abstractmethod
def todense(self):
"""
Convert this :obj:`SparseArray` array to a dense :obj:`numpy.ndarray`. Note that
this may take a large amount of memory and time.
Returns
-------
numpy.ndarray
The converted dense array.
See Also
--------
DOK.todense : Equivalent :obj:`DOK` array method.
COO.todense : Equivalent :obj:`COO` array method.
scipy.sparse.coo_matrix.todense : Equivalent Scipy method.
Examples
--------
>>> import sparse
>>> x = np.random.randint(100, size=(7, 3))
>>> s = sparse.COO.from_numpy(x)
>>> x2 = s.todense()
>>> np.array_equal(x, x2)
True
"""
def _make_shallow_copy_of(self, other):
self.__dict__ = other.__dict__.copy()
def __array__(self, *args, **kwargs):
from ._settings import AUTO_DENSIFY
if not AUTO_DENSIFY:
raise RuntimeError(
"Cannot convert a sparse array to dense automatically. "
"To manually densify, use the todense method."
)
return np.asarray(self.todense(), *args, **kwargs)
def __array_function__(self, func, types, args, kwargs):
import sparse as module
sparse_func = None
try:
submodules = getattr(func, "__module__", "numpy").split(".")[1:]
for submodule in submodules:
module = getattr(module, submodule)
sparse_func = getattr(module, func.__name__)
except AttributeError:
pass
else:
return sparse_func(*args, **kwargs)
try:
sparse_func = getattr(type(self), func.__name__)
except AttributeError:
pass
if (
not isinstance(sparse_func, Callable)
and len(args) == 1
and len(kwargs) == 0
):
try:
return getattr(self, func.__name__)
except AttributeError:
pass
if sparse_func is None:
return NotImplemented
return sparse_func(*args, **kwargs)
@staticmethod
def _reduce(method, *args, **kwargs):
assert len(args) == 1
self = args[0]
if isinstance(self, ss.spmatrix):
self = type(self).from_scipy_sparse(self)
return self.reduce(method, **kwargs)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop("out", None)
if out is not None and not all(isinstance(x, type(self)) for x in out):
return NotImplemented
if getattr(ufunc, "signature", None) is not None:
return self.__array_function__(
ufunc, (np.ndarray, type(self)), inputs, kwargs
)
if out is not None:
kwargs["dtype"] = out[0].dtype
if method == "outer":
method = "__call__"
cum_ndim = 0
inputs_transformed = []
for inp in reversed(inputs):
inputs_transformed.append(inp[(Ellipsis,) + (None,) * cum_ndim])
cum_ndim += inp.ndim
inputs = tuple(reversed(inputs_transformed))
if method == "__call__":
result = elemwise(ufunc, *inputs, **kwargs)
elif method == "reduce":
result = SparseArray._reduce(ufunc, *inputs, **kwargs)
else:
return NotImplemented
if out is not None:
(out,) = out
if out.shape != result.shape:
raise ValueError(
"non-broadcastable output operand with shape %s "
"doesn't match the broadcast shape %s" % (out.shape, result.shape)
)
out._make_shallow_copy_of(result)
return out
return result
def reduce(self, method, axis=(0,), keepdims=False, **kwargs):
"""
Performs a reduction operation on this array.
Parameters
----------
method : numpy.ufunc
The method to use for performing the reduction.
axis : Union[int, Iterable[int]], optional
The axes along which to perform the reduction. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
kwargs : dict
Any extra arguments to pass to the reduction operation.
See Also
--------
numpy.ufunc.reduce : A similar Numpy method.
COO.reduce : This method implemented on COO arrays.
GCXS.reduce : This method implemented on GCXS arrays.
"""
axis = normalize_axis(axis, self.ndim)
zero_reduce_result = method.reduce([self.fill_value, self.fill_value], **kwargs)
reduce_super_ufunc = None
if not equivalent(zero_reduce_result, self.fill_value):
reduce_super_ufunc = _reduce_super_ufunc.get(method, None)
if reduce_super_ufunc is None:
raise ValueError(
"Performing this reduction operation would produce "
"a dense result: %s" % str(method)
)
if not isinstance(axis, tuple):
axis = (axis,)
out = self._reduce_calc(method, axis, keepdims, **kwargs)
if len(out) == 1:
return out[0]
data, counts, axis, n_cols, arr_attrs = out
result_fill_value = self.fill_value
if reduce_super_ufunc is None:
missing_counts = counts != n_cols
data[missing_counts] = method(
data[missing_counts], self.fill_value, **kwargs
)
else:
data = method(
data,
reduce_super_ufunc(self.fill_value, n_cols - counts),
).astype(data.dtype)
result_fill_value = reduce_super_ufunc(self.fill_value, n_cols)
out = self._reduce_return(data, arr_attrs, result_fill_value)
if keepdims:
shape = list(self.shape)
for ax in axis:
shape[ax] = 1
out = out.reshape(shape)
if out.ndim == 0:
return out[()]
return out
def _reduce_calc(self, method, axis, keepdims, **kwargs):
raise NotImplementedError
def _reduce_return(self, data, arr_attrs, result_fill_value):
raise NotImplementedError
def sum(self, axis=None, keepdims=False, dtype=None, out=None):
"""
Performs a sum operation along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to sum. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.sum` : Equivalent numpy function.
scipy.sparse.coo_matrix.sum : Equivalent Scipy function.
"""
return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)
def max(self, axis=None, keepdims=False, out=None):
"""
Maximize along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to maximize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.max` : Equivalent numpy function.
scipy.sparse.coo_matrix.max : Equivalent Scipy function.
"""
return np.maximum.reduce(self, out=out, axis=axis, keepdims=keepdims)
amax = max
def any(self, axis=None, keepdims=False, out=None):
"""
See if any values along array are ``True``. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to minimize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.all` : Equivalent numpy function.
"""
return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False, out=None):
"""
See if all values in an array are ``True``. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to minimize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.all` : Equivalent numpy function.
"""
return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims)
def min(self, axis=None, keepdims=False, out=None):
"""
Minimize along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to minimize. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.min` : Equivalent numpy function.
scipy.sparse.coo_matrix.min : Equivalent Scipy function.
"""
return np.minimum.reduce(self, out=out, axis=axis, keepdims=keepdims)
amin = min
def prod(self, axis=None, keepdims=False, dtype=None, out=None):
"""
Performs a product operation along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to multiply. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
:obj:`numpy.prod` : Equivalent numpy function.
"""
return np.multiply.reduce(
self, out=out, axis=axis, keepdims=keepdims, dtype=dtype
)
def round(self, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
See also
--------
:obj:`numpy.round` : NumPy equivalent ufunc.
:obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two
arguments.
"""
if out is not None and not isinstance(out, tuple):
out = (out,)
return self.__array_ufunc__(
np.round, "__call__", self, decimals=decimals, out=out
)
round_ = round
def clip(self, min=None, max=None, out=None):
"""
Clip (limit) the values in the array.
Return an array whose values are limited to ``[min, max]``. One of min
or max must be given.
See Also
--------
sparse.clip : For full documentation and more details.
numpy.clip : Equivalent NumPy function.
"""
if min is None and max is None:
raise ValueError("One of max or min must be given.")
if out is not None and not isinstance(out, tuple):
out = (out,)
return self.__array_ufunc__(
np.clip, "__call__", self, a_min=min, a_max=max, out=out
)
def astype(self, dtype, casting="unsafe", copy=True):
"""
Copy of the array, cast to a specified type.
See also
--------
scipy.sparse.coo_matrix.astype : SciPy sparse equivalent function
numpy.ndarray.astype : NumPy equivalent ufunc.
:obj:`COO.elemwise`: Apply an arbitrary element-wise function to one or two
arguments.
"""
# this matches numpy's behavior
if self.dtype == dtype and not copy:
return self
return self.__array_ufunc__(
np.ndarray.astype, "__call__", self, dtype=dtype, copy=copy, casting=casting
)
def mean(self, axis=None, keepdims=False, dtype=None, out=None):
"""
Compute the mean along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to compute the mean. Uses all axes by default.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
dtype: numpy.dtype
The data type of the output array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
numpy.ndarray.mean : Equivalent numpy method.
scipy.sparse.coo_matrix.mean : Equivalent Scipy method.
Notes
-----
* This function internally calls :obj:`COO.sum_duplicates` to bring the
array into canonical form.
* The :code:`out` parameter is provided just for compatibility with
Numpy and isn't actually supported.
Examples
--------
You can use :obj:`COO.mean` to compute the mean of an array across any
dimension.
>>> from sparse import COO
>>> x = np.array([[1, 2, 0, 0],
... [0, 1, 0, 0]], dtype='i8')
>>> s = COO.from_numpy(x)
>>> s2 = s.mean(axis=1)
>>> s2.todense() # doctest: +SKIP
array([0.5, 1.5, 0., 0.])
You can also use the :code:`keepdims` argument to keep the dimensions
after the mean.
>>> s3 = s.mean(axis=0, keepdims=True)
>>> s3.shape
(1, 4)
You can pass in an output datatype, if needed.
>>> s4 = s.mean(axis=0, dtype=np.float16)
>>> s4.dtype
dtype('float16')
By default, this reduces the array down to one number, computing the
mean along all axes.
>>> s.mean()
0.5
"""
if axis is None:
axis = tuple(range(self.ndim))
elif not isinstance(axis, tuple):
axis = (axis,)
den = reduce(operator.mul, (self.shape[i] for i in axis), 1)
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = inter_dtype = np.dtype("f8")
else:
dtype = self.dtype
inter_dtype = (
np.dtype("f4") if issubclass(dtype.type, np.float16) else dtype
)
else:
inter_dtype = dtype
num = self.sum(axis=axis, keepdims=keepdims, dtype=inter_dtype)
if num.ndim:
out = np.true_divide(num, den, casting="unsafe")
return out.astype(dtype) if out.dtype != dtype else out
return np.divide(num, den, dtype=dtype, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the variance along the gi66ven axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to compute the variance. Uses all axes by default.
dtype : numpy.dtype, optional
The output datatype.
out: SparseArray, optional
The array to write the output to.
ddof: int
The degrees of freedom.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
numpy.ndarray.var : Equivalent numpy method.
Notes
-----
* This function internally calls :obj:`COO.sum_duplicates` to bring the
array into canonical form.
Examples
--------
You can use :obj:`COO.var` to compute the variance of an array across any
dimension.
>>> from sparse import COO
>>> x = np.array([[1, 2, 0, 0],
... [0, 1, 0, 0]], dtype='i8')
>>> s = COO.from_numpy(x)
>>> s2 = s.var(axis=1)
>>> s2.todense() # doctest: +SKIP
array([0.6875, 0.1875])
You can also use the :code:`keepdims` argument to keep the dimensions
after the variance.
>>> s3 = s.var(axis=0, keepdims=True)
>>> s3.shape
(1, 4)
You can pass in an output datatype, if needed.
>>> s4 = s.var(axis=0, dtype=np.float16)
>>> s4.dtype
dtype('float16')
By default, this reduces the array down to one number, computing the
variance along all axes.
>>> s.var()
0.5
"""
axis = normalize_axis(axis, self.ndim)
if axis is None:
axis = tuple(range(self.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
rcount = reduce(operator.mul, (self.shape[a] for a in axis), 1)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
arrmean = self.sum(axis, dtype=dtype, keepdims=True)
np.divide(arrmean, rcount, out=arrmean)
x = self - arrmean
if issubclass(self.dtype.type, np.complexfloating):
x = x.real * x.real + x.imag * x.imag
else:
x = np.multiply(x, x, out=x)
ret = x.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
ret = ret[...]
np.divide(ret, rcount, out=ret, casting="unsafe")
return ret[()]
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the given axes. Uses all axes by default.
Parameters
----------
axis : Union[int, Iterable[int]], optional
The axes along which to compute the standard deviation. Uses
all axes by default.
dtype : numpy.dtype, optional
The output datatype.
out: SparseArray, optional
The array to write the output to.
ddof: int
The degrees of freedom.
keepdims : bool, optional
Whether or not to keep the dimensions of the original array.
Returns
-------
SparseArray
The reduced output sparse array.
See Also
--------
numpy.ndarray.std : Equivalent numpy method.
Notes
-----
* This function internally calls :obj:`COO.sum_duplicates` to bring the
array into canonical form.
Examples
--------
You can use :obj:`COO.std` to compute the standard deviation of an array
across any dimension.
>>> from sparse import COO
>>> x = np.array([[1, 2, 0, 0],
... [0, 1, 0, 0]], dtype='i8')
>>> s = COO.from_numpy(x)
>>> s2 = s.std(axis=1)
>>> s2.todense() # doctest: +SKIP
array([0.8291562, 0.4330127])
You can also use the :code:`keepdims` argument to keep the dimensions
after the standard deviation.
>>> s3 = s.std(axis=0, keepdims=True)
>>> s3.shape
(1, 4)
You can pass in an output datatype, if needed.
>>> s4 = s.std(axis=0, dtype=np.float16)
>>> s4.dtype
dtype('float16')
By default, this reduces the array down to one number, computing the
standard deviation along all axes.
>>> s.std() # doctest: +SKIP
0.7071067811865476
"""
ret = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
ret = np.sqrt(ret)
return ret
@property
def real(self):
"""The real part of the array.
Examples
--------
>>> from sparse import COO
>>> x = COO.from_numpy([1 + 0j, 0 + 1j])
>>> x.real.todense() # doctest: +SKIP
array([1., 0.])
>>> x.real.dtype
dtype('float64')
Returns
-------
out : SparseArray
The real component of the array elements. If the array dtype is
real, the dtype of the array is used for the output. If the array
is complex, the output dtype is float.
See Also
--------
numpy.ndarray.real : NumPy equivalent attribute.
numpy.real : NumPy equivalent function.
"""
return self.__array_ufunc__(np.real, "__call__", self)
@property
def imag(self):
"""The imaginary part of the array.
Examples
--------
>>> from sparse import COO
>>> x = COO.from_numpy([1 + 0j, 0 + 1j])
>>> x.imag.todense() # doctest: +SKIP
array([0., 1.])
>>> x.imag.dtype
dtype('float64')
Returns
-------
out : SparseArray
The imaginary component of the array elements. If the array dtype
is real, the dtype of the array is used for the output. If the
array is complex, the output dtype is float.
See Also
--------
numpy.ndarray.imag : NumPy equivalent attribute.
numpy.imag : NumPy equivalent function.
"""
return self.__array_ufunc__(np.imag, "__call__", self)
def conj(self):
"""Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Examples
--------
>>> from sparse import COO
>>> x = COO.from_numpy([1 + 2j, 2 - 1j])
>>> res = x.conj()
>>> res.todense() # doctest: +SKIP
array([1.-2.j, 2.+1.j])
>>> res.dtype
dtype('complex128')
Returns
-------
out : SparseArray
The complex conjugate, with same dtype as the input.
See Also
--------
numpy.ndarray.conj : NumPy equivalent method.
numpy.conj : NumPy equivalent function.
"""
return np.conj(self)
|
py | 1a4892ac9870a1f63ee0954813b023c59d3afd19 | """Webroot plugin."""
import argparse
import collections
import json
import logging
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Set
from acme import challenges
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot._internal import cli
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge as AnnotatedChallenge
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
from certbot.plugins import util
from certbot.util import safe_open
logger = logging.getLogger(__name__)
_WEB_CONFIG_CONTENT = """\
<?xml version="1.0" encoding="UTF-8" ?>
<!--Generated by Certbot-->
<configuration>
<system.webServer>
<staticContent>
<remove fileExtension="."/>
<mimeMap fileExtension="." mimeType="text/plain" />
</staticContent>
</system.webServer>
</configuration>
"""
# This list references the hashes of all versions of the web.config files that Certbot could
# have generated during an HTTP-01 challenge. If you modify _WEB_CONFIG_CONTENT, you MUST add
# the new hash in this list.
_WEB_CONFIG_SHA256SUMS = [
"20c5ca1bd58fa8ad5f07a2f1be8b7cbb707c20fcb607a8fc8db9393952846a97",
"8d31383d3a079d2098a9d0c0921f4ab87e708b9868dc3f314d54094c2fe70336"
]
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Webroot Authenticator."""
description = "Place files in webroot directory"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self): # pylint: disable=missing-function-docstring
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add):
add("path", "-w", default=[], action=_WebrootPathAction,
help="public_html / webroot path. This can be specified multiple "
"times to handle different domains; each domain will have "
"the webroot path that preceded it. For instance: `-w "
"/var/www/example -d example.com -d www.example.com -w "
"/var/www/thing -d thing.net -d m.thing.net` (default: Ask)")
add("map", default={}, action=_WebrootMapAction,
help="JSON dictionary mapping domains to webroot paths; this "
"implies -d for each entry. You may need to escape this from "
"your shell. E.g.: --webroot-map "
'\'{"eg1.is,m.eg1.is":"/www/eg1/", "eg2.is":"/www/eg2"}\' '
"This option is merged with, but takes precedence over, -w / "
"-d entries. At present, if you put webroot-map in a config "
"file, it needs to be on a single line, like: webroot-map = "
'{"example.com":"/var/www"}.')
def auth_hint(self, failed_achalls): # pragma: no cover
return ("The Certificate Authority failed to download the temporary challenge files "
"created by Certbot. Ensure that the listed domains serve their content from "
"the provided --webroot-path/-w and that files created there can be downloaded "
"from the internet.")
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.full_roots: Dict[str, str] = {}
self.performed: DefaultDict[str, Set[AnnotatedChallenge]] = collections.defaultdict(set)
# stack of dirs successfully created by this authenticator
self._created_dirs: List[str] = []
def prepare(self): # pylint: disable=missing-function-docstring
pass
def perform(self, achalls): # pylint: disable=missing-function-docstring
self._set_webroots(achalls)
self._create_challenge_dirs()
return [self._perform_single(achall) for achall in achalls]
def _set_webroots(self, achalls):
if self.conf("path"):
webroot_path = self.conf("path")[-1]
logger.info("Using the webroot path %s for all unmatched domains.",
webroot_path)
for achall in achalls:
self.conf("map").setdefault(achall.domain, webroot_path)
else:
known_webroots = list(set(self.conf("map").values()))
for achall in achalls:
if achall.domain not in self.conf("map"):
new_webroot = self._prompt_for_webroot(achall.domain,
known_webroots)
# Put the most recently input
# webroot first for easy selection
try:
known_webroots.remove(new_webroot)
except ValueError:
pass
known_webroots.insert(0, new_webroot)
self.conf("map")[achall.domain] = new_webroot
def _prompt_for_webroot(self, domain, known_webroots):
webroot = None
while webroot is None:
if known_webroots:
# Only show the menu if we have options for it
webroot = self._prompt_with_webroot_list(domain, known_webroots)
if webroot is None:
webroot = self._prompt_for_new_webroot(domain)
else:
# Allow prompt to raise PluginError instead of looping forever
webroot = self._prompt_for_new_webroot(domain, True)
return webroot
def _prompt_with_webroot_list(self, domain, known_webroots):
path_flag = "--" + self.option_name("path")
while True:
code, index = display_util.menu(
"Select the webroot for {0}:".format(domain),
["Enter a new webroot"] + known_webroots,
cli_flag=path_flag, force_interactive=True)
if code == display_util.CANCEL:
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return None if index == 0 else known_webroots[index - 1] # code == display_util.OK
def _prompt_for_new_webroot(self, domain, allowraise=False):
code, webroot = ops.validated_directory(
_validate_webroot,
"Input the webroot for {0}:".format(domain),
force_interactive=True)
if code == display_util.CANCEL:
if not allowraise:
return None
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return _validate_webroot(webroot) # code == display_util.OK
def _create_challenge_dirs(self):
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
self.full_roots[name] = os.path.join(path, os.path.normcase(
challenges.HTTP01.URI_ROOT_PATH))
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = filesystem.umask(0o022)
try:
# We ignore the last prefix in the next iteration,
# as it does not correspond to a folder path ('/' or 'C:')
for prefix in sorted(util.get_prefixes(self.full_roots[name])[:-1], key=len):
if os.path.isdir(prefix):
# Don't try to create directory if it already exists, as some filesystems
# won't reliably raise EEXIST or EISDIR if directory exists.
continue
try:
# Set owner as parent directory if possible, apply mode for Linux/Windows.
# For Linux, this is coupled with the "umask" call above because
# os.mkdir's "mode" parameter may not always work:
# https://docs.python.org/3/library/os.html#os.mkdir
filesystem.mkdir(prefix, 0o755)
self._created_dirs.append(prefix)
try:
filesystem.copy_ownership_and_apply_mode(
path, prefix, 0o755, copy_user=True, copy_group=True)
except (OSError, AttributeError) as exception:
logger.warning("Unable to change owner and uid of webroot directory")
logger.debug("Error was: %s", exception)
except OSError as exception:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}".format(name, exception))
finally:
filesystem.umask(old_umask)
# On Windows, generate a local web.config file that allows IIS to serve expose
# challenge files despite the fact they do not have a file extension.
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(self.full_roots[name], "web.config")
if os.path.exists(web_config_path):
logger.info("A web.config file has not been created in "
"%s because another one already exists.", self.full_roots[name])
continue
logger.info("Creating a web.config file in %s to allow IIS "
"to serve challenge files.", self.full_roots[name])
with safe_open(web_config_path, mode="w", chmod=0o644) as web_config:
web_config.write(_WEB_CONFIG_CONTENT)
def _get_validation_path(self, root_path, achall):
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
root_path = self.full_roots[achall.domain]
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = filesystem.umask(0o022)
try:
with safe_open(validation_path, mode="wb", chmod=0o644) as validation_file:
validation_file.write(validation.encode())
finally:
filesystem.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-function-docstring
for achall in achalls:
root_path = self.full_roots.get(achall.domain, None)
if root_path is not None:
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(root_path, "web.config")
if os.path.exists(web_config_path):
sha256sum = crypto_util.sha256sum(web_config_path)
if sha256sum in _WEB_CONFIG_SHA256SUMS:
logger.info("Cleaning web.config file generated by Certbot in %s.",
root_path)
os.remove(web_config_path)
else:
logger.info("Not cleaning up the web.config file in %s "
"because it is not generated by Certbot.", root_path)
not_removed: List[str] = []
while self._created_dirs:
path = self._created_dirs.pop()
try:
os.rmdir(path)
except OSError as exc:
not_removed.insert(0, path)
logger.info("Challenge directory %s was not empty, didn't remove", path)
logger.debug("Error was: %s", exc)
self._created_dirs = not_removed
logger.debug("All challenges cleaned up")
class _WebrootMapAction(argparse.Action):
"""Action class for parsing webroot_map."""
def __call__(self, parser, namespace, webroot_map, option_string=None):
for domains, webroot_path in json.loads(webroot_map).items():
webroot_path = _validate_webroot(webroot_path)
namespace.webroot_map.update(
(d, webroot_path) for d in cli.add_domains(namespace, domains))
class _WebrootPathAction(argparse.Action):
"""Action class for parsing webroot_path."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._domain_before_webroot = False
def __call__(self, parser, namespace, webroot_path, option_string=None):
if self._domain_before_webroot:
raise errors.PluginError(
"If you specify multiple webroot paths, "
"one of them must precede all domain flags")
if namespace.webroot_path:
# Apply previous webroot to all matched
# domains before setting the new webroot path
prev_webroot = namespace.webroot_path[-1]
for domain in namespace.domains:
namespace.webroot_map.setdefault(domain, prev_webroot)
elif namespace.domains:
self._domain_before_webroot = True
namespace.webroot_path.append(_validate_webroot(webroot_path))
def _validate_webroot(webroot_path):
"""Validates and returns the absolute path of webroot_path.
:param str webroot_path: path to the webroot directory
:returns: absolute path of webroot_path
:rtype: str
"""
if not os.path.isdir(webroot_path):
raise errors.PluginError(webroot_path + " does not exist or is not a directory")
return os.path.abspath(webroot_path)
|
py | 1a48935e38818d22cc696fed37663cc02bd4fb0d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from neodroid.utilities.unity_specifications import Motion, Reaction, ReactionParameters
__author__ = "Christian Heider Nielsen"
import neodroid.wrappers.formal_wrapper as neo
def construct_reactions(env):
parameters = ReactionParameters(
terminable=True,
step=True,
reset=False,
configure=False,
describe=False,
episode_count=True,
)
action1, action2 = env.action_space.sample()
motions = [
Motion("ActorActor", "ActorTransformX_", action1),
Motion("ActorActor", "ActorTransformZ_", action2),
]
reactions = [
Reaction(
environment_name=f"EnvironmentPrototypingEnvironment",
parameters=parameters,
motions=motions,
)
]
for i in range(19):
action1, action2 = env.action_space.sample()
motions = [
Motion("ActorActor", "ActorTransformX_", action1),
Motion("ActorActor", "ActorTransformZ_", action2),
]
reaction = Reaction(
environment_name=f"Environment(Clone){i}PrototypingEnvironment",
parameters=parameters,
motions=motions,
)
reactions.append(reaction)
return reactions
def main():
_environments = neo.NeodroidEnvironment(name="multienv", connect_to_running=True)
while _environments.is_connected:
reactions = construct_reactions(_environments)
states = _environments.react(reactions)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.