metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "2985578957/flex_extract",
"score": 2
} |
#### File: Python/Classes/EcFlexpart.py
```python
from __future__ import print_function
import os
import sys
import glob
import shutil
from datetime import datetime, timedelta
# software specific classes and modules from flex_extract
#pylint: disable=wrong-import-position
sys.path.append('../')
import _config
from Classes.GribUtil import GribUtil
from Mods.tools import (init128, to_param_id, silent_remove, product,
my_error, get_informations, get_dimensions,
execute_subprocess, to_param_id_with_tablenumber,
generate_retrieval_period_boundary)
from Classes.MarsRetrieval import MarsRetrieval
from Classes.UioFiles import UioFiles
import Mods.disaggregation as disaggregation
#pylint: enable=wrong-import-position
# ------------------------------------------------------------------------------
# CLASS
# ------------------------------------------------------------------------------
class EcFlexpart(object):
'''
Class to represent FLEXPART specific ECMWF data.
FLEXPART needs grib files in a specifc format. All necessary data fields
for one time step are stored in a single file. The class represents an
instance with all the parameter and settings necessary for retrieving
MARS data and modifing them so they are fitting FLEXPART needs. The class
is able to disaggregate the fluxes and convert grid types to the one needed
by FLEXPART, therefore using the FORTRAN program.
Attributes
----------
mreq_count : int
Counter for the number of generated mars requests.
inputdir : str
Path to the directory where the retrieved data is stored.
dataset : str
For public datasets there is the specific naming and parameter
dataset which has to be used to characterize the type of
data.
basetime : int
The time for a half day retrieval. The 12 hours upfront are to be
retrieved.
dtime : str
Time step in hours.
acctype : str
The field type for the accumulated forecast fields.
acctime : str
The starting time from the accumulated forecasts.
accmaxstep : str
The maximum forecast step for the accumulated forecast fields.
marsclass : str
Characterisation of dataset.
stream : str
Identifies the forecasting system used to generate the data.
number : str
Selects the member in ensemble forecast run.
resol : str
Specifies the desired triangular truncation of retrieved data,
before carrying out any other selected post-processing.
accuracy : str
Specifies the number of bits per value to be used in the
generated GRIB coded fields.
addpar : str
List of additional parameters to be retrieved.
level : str
Specifies the maximum level.
expver : str
The version of the dataset.
levelist : str
Specifies the required levels.
glevelist : str
Specifies the required levels for gaussian grids.
gaussian : str
This parameter is deprecated and should no longer be used.
Specifies the desired type of Gaussian grid for the output.
grid : str
Specifies the output grid which can be either a Gaussian grid
or a Latitude/Longitude grid.
area : str
Specifies the desired sub-area of data to be extracted.
purefc : int
Switch for definition of pure forecast mode or not.
outputfilelist : list of str
The final list of FLEXPART ready input files.
types : dictionary
Determines the combination of type of fields, time and forecast step
to be retrieved.
params : dictionary
Collection of grid types and their corresponding parameters,
levels, level types and the grid definition.
server : ECMWFService or ECMWFDataServer
This is the connection to the ECMWF data servers.
public : int
Decides which Web API Server version is used.
dates : str
Contains start and end date of the retrieval in the format
"YYYYMMDD/to/YYYYMMDD"
'''
# --------------------------------------------------------------------------
# CLASS FUNCTIONS
# --------------------------------------------------------------------------
def __init__(self, c, fluxes=False):
'''Creates an object/instance of EcFlexpart with the associated
settings of its attributes for the retrieval.
Parameters:
-----------
c : ControlFile
Contains all the parameters of CONTROL file and
command line.
fluxes : boolean, optional
Decides if the flux parameter settings are stored or
the rest of the parameter list.
Default value is False.
Return
------
'''
# set a counter for the number of generated mars requests
self.mreq_count = 0
self.inputdir = c.inputdir
self.dataset = c.dataset
self.basetime = c.basetime
self.dtime = c.dtime
self.acctype = c.acctype
self.acctime = c.acctime
self.accmaxstep = c.accmaxstep
self.marsclass = c.marsclass
self.stream = c.stream
self.number = c.number
self.resol = c.resol
self.accuracy = c.accuracy
self.addpar = c.addpar
self.level = c.level
self.expver = c.expver
self.levelist = c.levelist
self.glevelist = '1/to/' + c.level # in case of gaussian grid
self.gaussian = c.gaussian
self.grid = c.grid
self.area = c.area
self.purefc = c.purefc
self.outputfilelist = []
# Define the different types of field combinations (type, time, step)
self.types = {}
# Define the parameters and their level types, level list and
# grid resolution for the retrieval job
self.params = {}
if fluxes:
self._create_params_fluxes()
else:
self._create_params(c.gauss, c.eta, c.omega, c.cwc, c.wrf)
if fluxes:# and not c.purefc:
self._create_field_types_fluxes()
else:
self._create_field_types(c.type, c.time, c.step)
return
def _create_field_types(self, ftype, ftime, fstep):
'''Create the combination of field type, time and forecast step.
Parameters:
-----------
ftype : list of str
List of field types.
ftime : list of str
The time in hours of the field.
fstep : str
Specifies the forecast time step from forecast base time.
Valid values are hours (HH) from forecast base time.
Return
------
'''
i = 0
for ty, st, ti in zip(ftype, fstep, ftime):
btlist = list(range(len(ftime)))
if self.basetime == 12:
btlist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
if self.basetime == 0:
btlist = [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0]
# if ((ty.upper() == 'AN' and (int(c.time[i]) % int(c.dtime)) == 0) or
# (ty.upper() != 'AN' and (int(c.step[i]) % int(c.dtime)) == 0 and
# (int(c.step[i]) % int(c.dtime) == 0)) ) and \
# (int(c.time[i]) in btlist or c.purefc):
if (i in btlist) or self.purefc:
if ((ty.upper() == 'AN' and (int(ti) % int(self.dtime)) == 0) or
(ty.upper() != 'AN' and (int(st) % int(self.dtime)) == 0)):
if ty not in self.types.keys():
self.types[ty] = {'times': '', 'steps': ''}
if ti not in self.types[ty]['times']:
if self.types[ty]['times']:
self.types[ty]['times'] += '/'
self.types[ty]['times'] += ti
if st not in self.types[ty]['steps']:
if self.types[ty]['steps']:
self.types[ty]['steps'] += '/'
self.types[ty]['steps'] += st
i += 1
return
def _create_field_types_fluxes(self):
'''Create the combination of field type, time and forecast step
for the flux data.
Parameters:
-----------
Return
------
'''
if self.purefc:
# need to retrieve forecasts for step 000 in case of pure forecast
steps = '{}/to/{}/by/{}'.format(0, self.accmaxstep, self.dtime)
else:
steps = '{}/to/{}/by/{}'.format(self.dtime,
self.accmaxstep,
self.dtime)
self.types[str(self.acctype)] = {'times': str(self.acctime),
'steps': steps}
return
def _create_params(self, gauss, eta, omega, cwc, wrf):
'''Define the specific parameter settings for retrievment.
The different parameters need specific grid types and level types
for retrievement. We might get following combination of types
(depending on selection and availability):
(These are short cuts for the grib file names (leading sequence)
SH__ML, OG__ML, GG__ML, SH__SL, OG__SL, GG__SL, OG_OROLSM_SL
where:
SH = Spherical Harmonics, GG = Gaussian Grid, OG = Output Grid,
ML = Model Level, SL = Surface Level
For each of this combination there is a list of parameter names,
the level type, the level list and the grid resolution.
There are different scenarios for data extraction from MARS:
1) Retrieval of etadot
eta=1, gauss=0, omega=0
2) Calculation of etadot from divergence
eta=0, gauss=1, omega=0
3) Calculation of etadot from omega (for makes sense for debugging)
eta=0, gauss=0, omega=1
4) Retrieval and Calculation of etadot (only for debugging)
eta=1, gauss=1, omega=0
5) Download also specific model and surface level data for FLEXPART-WRF
Parameters:
-----------
gauss : int
Gaussian grid is retrieved.
eta : int
Etadot parameter will be directly retrieved.
omega : int
The omega paramterwill be retrieved.
cwc : int
The cloud liquid and ice water content will be retrieved.
wrf : int
Additional model level and surface level data will be retrieved for
WRF/FLEXPART-WRF simulations.
Return
------
'''
# SURFACE FIELDS
#-----------------------------------------------------------------------
self.params['SH__SL'] = ['LNSP', 'ML', '1', 'OFF']
self.params['OG__SL'] = ['SD/MSL/TCC/10U/10V/2T/2D/Z/LSM', \
'SFC', '1', self.grid]
if self.addpar:
self.params['OG__SL'][0] += self.addpar
if self.marsclass.upper() == 'EA' or self.marsclass.upper() == 'EP':
self.params['OG_OROLSM__SL'] = ["SDOR/CVL/CVH/FSR",
'SFC', '1', self.grid]
else:
self.params['OG_OROLSM__SL'] = ["SDOR/CVL/CVH/SR", \
'SFC', '1', self.grid]
# MODEL LEVEL FIELDS
#-----------------------------------------------------------------------
self.params['OG__ML'] = ['T/Q', 'ML', self.levelist, self.grid]
if not gauss and eta:
self.params['OG__ML'][0] += '/U/V/ETADOT'
elif gauss and not eta:
self.params['GG__SL'] = ['Q', 'ML', '1',
'{}'.format((int(self.resol) + 1) // 2)]
self.params['SH__ML'] = ['U/V/D', 'ML', self.glevelist, 'OFF']
elif not gauss and not eta:
self.params['OG__ML'][0] += '/U/V'
else: # GAUSS and ETA
print('Warning: Collecting etadot and parameters for gaussian grid '
'is a very costly parameter combination, '
'use this combination only for debugging!')
self.params['GG__SL'] = ['Q', 'ML', '1',
'{}'.format((int(self.resol) + 1) // 2)]
self.params['GG__ML'] = ['U/V/D/ETADOT', 'ML', self.glevelist,
'{}'.format((int(self.resol) + 1) // 2)]
if omega:
self.params['OG__ML'][0] += '/W'
if cwc:
self.params['OG__ML'][0] += '/CLWC/CIWC'
# ADDITIONAL FIELDS FOR FLEXPART-WRF MODEL (IF QUESTIONED)
# ----------------------------------------------------------------------
if wrf:
# @WRF
# THIS IS NOT YET CORRECTLY IMPLEMENTED !!!
#
# UNDER CONSTRUCTION !!!
#
print('WRF VERSION IS UNDER CONSTRUCTION!') # dummy argument
#self.params['OG__ML'][0] += '/Z/VO'
#if '/D' not in self.params['OG__ML'][0]:
# self.params['OG__ML'][0] += '/D'
#wrf_sfc = ['SP','SKT','SST','CI','STL1','STL2', 'STL3','STL4',
# 'SWVL1','SWVL2','SWVL3','SWVL4']
#for par in wrf_sfc:
# if par not in self.params['OG__SL'][0]:
# self.params['OG__SL'][0] += '/' + par
return
def _create_params_fluxes(self):
'''Define the parameter setting for flux data.
Flux data are accumulated fields in time and are stored on the
surface level. The leading short cut name for the grib files is:
"OG_acc_SL" with OG for Regular Output Grid, SL for Surface Level, and
acc for Accumulated Grid.
The params dictionary stores a list of parameter names, the level type,
the level list and the grid resolution.
The flux data are: LSP/CP/SSHF/EWSS/NSSS/SSR
Parameters:
-----------
Return
------
'''
self.params['OG_acc_SL'] = ["LSP/CP/SSHF/EWSS/NSSS/SSR",
'SFC', '1', self.grid]
return
def _mk_targetname(self, ftype, param, date):
'''Creates the filename for the requested grib data to be stored in.
This name is passed as the "target" parameter in the request.
Parameters
----------
ftype : str
Shortcut name of the type of the field. E.g. AN, FC, PF, ...
param : str
Shortcut of the grid type. E.g. SH__ML, SH__SL, GG__ML,
GG__SL, OG__ML, OG__SL, OG_OROLSM_SL, OG_acc_SL
date : str
The date period of the grib data to be stored in this file.
Return
------
targetname : str
The target filename for the grib data.
'''
targetname = (self.inputdir + '/' + ftype + param + '.' + date + '.' +
str(os.getppid()) + '.' + str(os.getpid()) + '.grb')
return targetname
def _start_retrievement(self, request, par_dict):
'''Creates the Mars Retrieval and prints or submits the request
depending on the status of the request variable.
Parameters
----------
request : int
Selects the mode of retrieval.
0: Retrieves the data from ECMWF.
1: Prints the mars requests to an output file.
2: Retrieves the data and prints the mars request.
par_dict : dictionary
Contains all parameter which have to be set for creating the
Mars Retrievals. The parameter are:
marsclass, dataset, stream, type, levtype, levelist, resol,
gaussian, accuracy, grid, target, area, date, time, number,
step, expver, param
Return
------
'''
# increase number of mars requests
self.mreq_count += 1
MR = MarsRetrieval(self.server,
self.public,
marsclass=par_dict['marsclass'],
dataset=par_dict['dataset'],
stream=par_dict['stream'],
type=par_dict['type'],
levtype=par_dict['levtype'],
levelist=par_dict['levelist'],
resol=par_dict['resol'],
gaussian=par_dict['gaussian'],
accuracy=par_dict['accuracy'],
grid=par_dict['grid'],
target=par_dict['target'],
area=par_dict['area'],
date=par_dict['date'],
time=par_dict['time'],
number=par_dict['number'],
step=par_dict['step'],
expver=par_dict['expver'],
param=par_dict['param'])
if request == 0:
MR.display_info()
MR.data_retrieve()
elif request == 1:
MR.print_infodata_csv(self.inputdir, self.mreq_count)
elif request == 2:
MR.print_infodata_csv(self.inputdir, self.mreq_count)
MR.display_info()
MR.data_retrieve()
else:
print('Failure')
return
def _mk_index_values(self, inputdir, inputfiles, keys):
'''Creates an index file for a set of grib parameter keys.
The values from the index keys are returned in a list.
Parameters
----------
keys : dictionary
List of parameter names which serves as index.
inputfiles : UioFiles
Contains a list of files.
Return
------
iid : codes_index
This is a grib specific index structure to access
messages in a file.
index_vals : list of list of str
Contains the values from the keys used for a distinct selection
of grib messages in processing the grib files.
Content looks like e.g.:
index_vals[0]: ('20171106', '20171107', '20171108') ; date
index_vals[1]: ('0', '1200', '1800', '600') ; time
index_vals[2]: ('0', '12', '3', '6', '9') ; stepRange
'''
from eccodes import codes_index_get
iid = None
index_keys = keys
indexfile = os.path.join(inputdir, _config.FILE_GRIB_INDEX)
silent_remove(indexfile)
grib = GribUtil(inputfiles.files)
# creates new index file
iid = grib.index(index_keys=index_keys, index_file=indexfile)
# read the values of index keys
index_vals = []
for key in index_keys:
key_vals = codes_index_get(iid, key)
# have to sort the key values for correct order,
# therefore convert to int first
key_vals = [int(k) for k in key_vals]
key_vals.sort()
key_vals = [str(k) for k in key_vals]
index_vals.append(key_vals)
# index_vals looks for example like:
# index_vals[0]: ('20171106', '20171107', '20171108') ; date
# index_vals[1]: ('0', '1200') ; time
# index_vals[2]: (3', '6', '9', '12') ; stepRange
return iid, index_vals
def retrieve(self, server, dates, public, request, inputdir='.'):
'''Finalizing the retrieval information by setting final details
depending on grid type.
Prepares MARS retrievals per grid type and submits them.
Parameters
----------
server : ECMWFService or ECMWFDataServer
The connection to the ECMWF server. This is different
for member state users which have full access and non
member state users which have only access to the public
data sets. The decision is made from command line argument
"public"; for public access its True (ECMWFDataServer)
for member state users its False (ECMWFService)
dates : str
Contains start and end date of the retrieval in the format
"YYYYMMDD/to/YYYYMMDD"
request : int
Selects the mode of retrieval.
0: Retrieves the data from ECMWF.
1: Prints the mars requests to an output file.
2: Retrieves the data and prints the mars request.
inputdir : str, optional
Path to the directory where the retrieved data is about
to be stored. The default is the current directory ('.').
Return
------
'''
self.dates = dates
self.server = server
self.public = public
self.inputdir = inputdir
oro = False
# define times with datetime module
t12h = timedelta(hours=12)
t24h = timedelta(hours=24)
# dictionary which contains all parameter for the mars request,
# entries with a "None" will change in different requests and will
# therefore be set in each request seperately
retr_param_dict = {'marsclass':self.marsclass,
'dataset':self.dataset,
'stream':None,
'type':None,
'levtype':None,
'levelist':None,
'resol':self.resol,
'gaussian':None,
'accuracy':self.accuracy,
'grid':None,
'target':None,
'area':None,
'date':None,
'time':None,
'number':self.number,
'step':None,
'expver':self.expver,
'param':None}
for ftype in sorted(self.types):
# ftype contains field types such as
# [AN, FC, PF, CV]
for pk, pv in sorted(self.params.items()):
# pk contains one of these keys of params
# [SH__ML, SH__SL, GG__ML, GG__SL, OG__ML, OG__SL,
# OG_OROLSM_SL, OG_acc_SL]
# pv contains all of the items of the belonging key
# [param, levtype, levelist, grid]
if isinstance(pv, str):
continue
retr_param_dict['type'] = ftype
retr_param_dict['time'] = self.types[ftype]['times']
retr_param_dict['step'] = self.types[ftype]['steps']
retr_param_dict['date'] = self.dates
retr_param_dict['stream'] = self.stream
retr_param_dict['target'] = \
self._mk_targetname(ftype,
pk,
retr_param_dict['date'].split('/')[0])
table128 = init128(_config.PATH_GRIBTABLE)
ids = to_param_id_with_tablenumber(pv[0], table128)
retr_param_dict['param'] = ids
retr_param_dict['levtype'] = pv[1]
retr_param_dict['levelist'] = pv[2]
retr_param_dict['grid'] = pv[3]
retr_param_dict['area'] = self.area
retr_param_dict['gaussian'] = self.gaussian
if pk == 'OG_OROLSM__SL' and not oro:
oro = True
# in CERA20C (class EP) there is no stream "OPER"!
if self.marsclass.upper() != 'EP':
retr_param_dict['stream'] = 'OPER'
retr_param_dict['type'] = 'AN'
retr_param_dict['time'] = '00'
retr_param_dict['step'] = '000'
retr_param_dict['date'] = self.dates.split('/')[0]
retr_param_dict['target'] = self._mk_targetname('',
pk,
retr_param_dict['date'])
elif pk == 'OG_OROLSM__SL' and oro:
continue
if pk == 'GG__SL' and pv[0] == 'Q':
retr_param_dict['area'] = ""
retr_param_dict['gaussian'] = 'reduced'
if ftype.upper() == 'FC' and \
'acc' not in retr_param_dict['target']:
if (int(retr_param_dict['time'][0]) +
int(retr_param_dict['step'][0])) > 23:
dates = retr_param_dict['date'].split('/')
sdate = datetime.strptime(dates[0], '%Y%m%d%H')
sdate = sdate - timedelta(days=1)
retr_param_dict['date'] = '/'.join(
[sdate.strftime("%Y%m%d")] +
retr_param_dict['date'][1:])
print('CHANGED FC start date to ' +
sdate.strftime("%Y%m%d") +
' to accomodate TIME=' +
retr_param_dict['time'][0] +
', STEP=' +
retr_param_dict['time'][0])
# ------ on demand path --------------------------------------------------
if self.basetime is None:
# ******* start retrievement
self._start_retrievement(request, retr_param_dict)
# ------ operational path ------------------------------------------------
else:
# check if mars job requests fields beyond basetime.
# if yes eliminate those fields since they may not
# be accessible with user's credentials
enddate = retr_param_dict['date'].split('/')[-1]
elimit = datetime.strptime(enddate + str(self.basetime),
'%Y%m%d%H')
if self.basetime == 12:
# -------------- flux data ----------------------------
if 'acc' in pk:
startdate = retr_param_dict['date'].split('/')[0]
enddate = datetime.strftime(elimit - t24h, '%Y%m%d')
retr_param_dict['date'] = '/'.join([startdate,
'to',
enddate])
# ******* start retrievement
self._start_retrievement(request, retr_param_dict)
retr_param_dict['date'] = \
datetime.strftime(elimit - t12h, '%Y%m%d')
retr_param_dict['time'] = '00'
retr_param_dict['target'] = \
self._mk_targetname(ftype, pk,
retr_param_dict['date'])
# ******* start retrievement
self._start_retrievement(request, retr_param_dict)
# -------------- non flux data ------------------------
else:
# ******* start retrievement
self._start_retrievement(request, retr_param_dict)
elif self.basetime == 0:
# retr_param_dict['date'] = \
# datetime.strftime(elimit - t24h, '%Y%m%d')
timesave = ''.join(retr_param_dict['time'])
if all(['/' in retr_param_dict['time'],
pk != 'OG_OROLSM__SL',
'acc' not in pk]):
times = retr_param_dict['time'].split('/')
steps = retr_param_dict['step'].split('/')
while int(times[0]) + int(steps[0]) <= 12:
times = times[1:]
if len(times) > 1:
retr_param_dict['time'] = '/'.join(times)
else:
retr_param_dict['time'] = times[0]
if all([pk != 'OG_OROLSM__SL',
int(retr_param_dict['step'].split('/')[0]) == 0,
int(timesave.split('/')[0]) == 0]):
retr_param_dict['date'] = \
datetime.strftime(elimit, '%Y%m%d')
retr_param_dict['time'] = '00'
retr_param_dict['step'] = '000'
retr_param_dict['target'] = \
self._mk_targetname(ftype, pk,
retr_param_dict['date'])
# ******* start retrievement
self._start_retrievement(request, retr_param_dict)
else:
raise ValueError('ERROR: Basetime has an invalid value '
'-> {}'.format(str(self.basetime)))
if request == 0 or request == 2:
print('MARS retrieve done ... ')
elif request == 1:
print('MARS request printed ...')
return
def write_namelist(self, c):
'''Creates a namelist file in the temporary directory and writes
the following values to it: maxl, maxb, mlevel,
mlevelist, mnauf, metapar, rlo0, rlo1, rla0, rla1,
momega, momegadiff, mgauss, msmooth, meta, metadiff, mdpdeta
Parameters
----------
c : ControlFile
Contains all the parameters of CONTROL file and
command line.
filename : str
Name of the namelist file.
Return
------
'''
from genshi.template.text import NewTextTemplate
from genshi.template import TemplateLoader
from genshi.template.eval import UndefinedError
import numpy as np
try:
loader = TemplateLoader(_config.PATH_TEMPLATES, auto_reload=False)
namelist_template = loader.load(_config.TEMPFILE_NAMELIST,
cls=NewTextTemplate)
self.inputdir = c.inputdir
area = np.asarray(self.area.split('/')).astype(float)
grid = np.asarray(self.grid.split('/')).astype(float)
if area[1] > area[3]:
area[1] -= 360
maxl = int(round((area[3] - area[1]) / grid[1])) + 1
maxb = int(round((area[0] - area[2]) / grid[0])) + 1
stream = namelist_template.generate(
maxl=str(maxl),
maxb=str(maxb),
mlevel=str(self.level),
mlevelist=str(self.levelist),
mnauf=str(self.resol),
metapar='77',
rlo0=str(area[1]),
rlo1=str(area[3]),
rla0=str(area[2]),
rla1=str(area[0]),
momega=str(c.omega),
momegadiff=str(c.omegadiff),
mgauss=str(c.gauss),
msmooth=str(c.smooth),
meta=str(c.eta),
metadiff=str(c.etadiff),
mdpdeta=str(c.dpdeta)
)
except UndefinedError as e:
print('... ERROR ' + str(e))
sys.exit('\n... error occured while trying to generate namelist ' +
_config.TEMPFILE_NAMELIST)
except OSError as e:
print('... ERROR CODE: ' + str(e.errno))
print('... ERROR MESSAGE:\n \t ' + str(e.strerror))
sys.exit('\n... error occured while trying to generate template ' +
_config.TEMPFILE_NAMELIST)
try:
namelistfile = os.path.join(self.inputdir, _config.FILE_NAMELIST)
with open(namelistfile, 'w') as f:
f.write(stream.render('text'))
except OSError as e:
print('... ERROR CODE: ' + str(e.errno))
print('... ERROR MESSAGE:\n \t ' + str(e.strerror))
sys.exit('\n... error occured while trying to write ' +
namelistfile)
return
def deacc_fluxes(self, inputfiles, c):
'''De-accumulate and disaggregate flux data.
Goes through all flux fields in ordered time and de-accumulate
the fields. Afterwards the fields are disaggregated in time.
Different versions of disaggregation is provided for rainfall
data (darain, modified linear) and the surface fluxes and
stress data (dapoly, cubic polynomial).
Parameters
----------
inputfiles : UioFiles
Contains the list of files that contain flux data.
c : ControlFile
Contains all the parameters of CONTROL file and
command line.
Return
------
'''
import numpy as np
from eccodes import (codes_index_select, codes_get,
codes_get_values, codes_set_values, codes_set,
codes_write, codes_release, codes_new_from_index,
codes_index_release)
table128 = init128(_config.PATH_GRIBTABLE)
# get ids from the flux parameter names
pars = to_param_id(self.params['OG_acc_SL'][0], table128)
iid = None
index_vals = None
# get the values of the keys which are used for distinct access
# of grib messages via product and save the maximum number of
# ensemble members if there is more than one
if '/' in self.number:
# more than one ensemble member is selected
index_keys = ["number", "date", "time", "step"]
# maximum ensemble number retrieved
# + 1 for the control run (ensemble number 0)
maxnum = int(self.number.split('/')[-1]) + 1
# remember the index of the number values
index_number = index_keys.index('number')
# empty set to save ensemble numbers which were already processed
ens_numbers = set()
# index for the ensemble number
inumb = 0
else:
index_keys = ["date", "time", "step"]
# maximum ensemble number
maxnum = None
# get sorted lists of the index values
# this is very important for disaggregating
# the flux data in correct order
iid, index_vals = self._mk_index_values(c.inputdir,
inputfiles,
index_keys)
# index_vals looks like e.g.:
# index_vals[0]: ('20171106', '20171107', '20171108') ; date
# index_vals[1]: ('0', '600', '1200', '1800') ; time
# index_vals[2]: ('0', '3', '6', '9', '12') ; stepRange
if c.rrint:
# set start and end timestamps for retrieval period
if not c.purefc:
start_date = datetime.strptime(c.start_date + '00', '%Y%m%d%H')
end_date = datetime.strptime(c.end_date + '23', '%Y%m%d%H')
else:
sdate_str = c.start_date + '{:0>2}'.format(index_vals[1][0])
start_date = datetime.strptime(sdate_str, '%Y%m%d%H')
edate_str = c.end_date + '{:0>2}'.format(index_vals[1][-1])
end_date = datetime.strptime(edate_str, '%Y%m%d%H')
end_date = end_date + timedelta(hours=c.maxstep)
# get necessary grid dimensions from grib files for storing the
# precipitation fields
info = get_informations(os.path.join(c.inputdir,
inputfiles.files[0]))
dims = get_dimensions(info, c.purefc, c.dtime, index_vals,
start_date, end_date)
# create empty numpy arrays
if not maxnum:
lsp_np = np.zeros((dims[1] * dims[0], dims[2]), dtype=np.float64)
cp_np = np.zeros((dims[1] * dims[0], dims[2]), dtype=np.float64)
else:
lsp_np = np.zeros((maxnum, dims[1] * dims[0], dims[2]), dtype=np.float64)
cp_np = np.zeros((maxnum, dims[1] * dims[0], dims[2]), dtype=np.float64)
# index counter for time line
it_lsp = 0
it_cp = 0
# store the order of date and step
date_list = []
step_list = []
# initialize dictionaries to store flux values per parameter
orig_vals = {}
deac_vals = {}
for p in pars:
orig_vals[p] = []
deac_vals[p] = []
# "product" genereates each possible combination between the
# values of the index keys
for prod in product(*index_vals):
# e.g. prod = ('20170505', '0', '12')
# ( date ,time, step)
print('CURRENT PRODUCT: ', prod)
# the whole process has to be done for each seperate ensemble member
# therefore, for each new ensemble member we delete old flux values
# and start collecting flux data from the beginning time step
if maxnum and prod[index_number] not in ens_numbers:
ens_numbers.add(prod[index_number])
inumb = len(ens_numbers) - 1
# re-initialize dictionaries to store flux values per parameter
# for the next ensemble member
it_lsp = 0
it_cp = 0
orig_vals = {}
deac_vals = {}
for p in pars:
orig_vals[p] = []
deac_vals[p] = []
for i in range(len(index_keys)):
codes_index_select(iid, index_keys[i], prod[i])
# get first id from current product
gid = codes_new_from_index(iid)
# if there is no data for this specific time combination / product
# skip the rest of the for loop and start with next timestep/product
if not gid:
continue
# create correct timestamp from the three time informations
cdate = str(codes_get(gid, 'date'))
time = codes_get(gid, 'time') // 100 # integer
step = codes_get(gid, 'step') # integer
ctime = '{:0>2}'.format(time)
t_date = datetime.strptime(cdate + ctime, '%Y%m%d%H')
t_dt = t_date + timedelta(hours=step)
t_m1dt = t_date + timedelta(hours=step-int(c.dtime))
t_m2dt = t_date + timedelta(hours=step-2*int(c.dtime))
if c.basetime is not None:
t_enddate = datetime.strptime(c.end_date + str(c.basetime),
'%Y%m%d%H')
else:
t_enddate = t_date + timedelta(2*int(c.dtime))
# if necessary, add ensemble member number to filename suffix
# otherwise, add empty string
if maxnum:
numbersuffix = '.N{:0>3}'.format(int(prod[index_number]))
else:
numbersuffix = ''
if c.purefc:
fnout = os.path.join(c.inputdir, 'flux' +
t_date.strftime('%Y%m%d.%H') +
'.{:0>3}'.format(step-2*int(c.dtime)) +
numbersuffix)
gnout = os.path.join(c.inputdir, 'flux' +
t_date.strftime('%Y%m%d.%H') +
'.{:0>3}'.format(step-int(c.dtime)) +
numbersuffix)
hnout = os.path.join(c.inputdir, 'flux' +
t_date.strftime('%Y%m%d.%H') +
'.{:0>3}'.format(step) +
numbersuffix)
else:
fnout = os.path.join(c.inputdir, 'flux' +
t_m2dt.strftime('%Y%m%d%H') + numbersuffix)
gnout = os.path.join(c.inputdir, 'flux' +
t_m1dt.strftime('%Y%m%d%H') + numbersuffix)
hnout = os.path.join(c.inputdir, 'flux' +
t_dt.strftime('%Y%m%d%H') + numbersuffix)
print("outputfile = " + fnout)
f_handle = open(fnout, 'wb')
h_handle = open(hnout, 'wb')
g_handle = open(gnout, 'wb')
# read message for message and store relevant data fields, where
# data keywords are stored in pars
while True:
if not gid:
break
parId = codes_get(gid, 'paramId') # integer
step = codes_get(gid, 'step') # integer
time = codes_get(gid, 'time') # integer
ni = codes_get(gid, 'Ni') # integer
nj = codes_get(gid, 'Nj') # integer
if parId not in orig_vals.keys():
# parameter is not a flux, find next one
continue
# define conversion factor
if parId == 142 or parId == 143:
fak = 1. / 1000.
else:
fak = 3600.
# get parameter values and reshape
values = codes_get_values(gid)
values = (np.reshape(values, (nj, ni))).flatten() / fak
# save the original and accumulated values
orig_vals[parId].append(values[:])
if c.marsclass.upper() == 'EA' or step <= int(c.dtime):
# no de-accumulation needed
deac_vals[parId].append(values[:] / int(c.dtime))
else:
# do de-accumulation
deac_vals[parId].append(
(orig_vals[parId][-1] - orig_vals[parId][-2]) /
int(c.dtime))
# store precipitation if new disaggregation method is selected
# only the exact days are needed
if c.rrint:
if start_date <= t_dt <= end_date:
if not c.purefc:
if t_dt not in date_list:
date_list.append(t_dt)
step_list = [0]
else:
if t_date not in date_list:
date_list.append(t_date)
if step not in step_list:
step_list.append(step)
# store precipitation values
if maxnum and parId == 142:
lsp_np[inumb, :, it_lsp] = deac_vals[parId][-1][:]
it_lsp += 1
elif not maxnum and parId == 142:
lsp_np[:, it_lsp] = deac_vals[parId][-1][:]
it_lsp += 1
elif maxnum and parId == 143:
cp_np[inumb, :, it_cp] = deac_vals[parId][-1][:]
it_cp += 1
elif not maxnum and parId == 143:
cp_np[:, it_cp] = deac_vals[parId][-1][:]
it_cp += 1
# information printout
print(parId, time, step, len(values), values[0], np.std(values))
# length of deac_vals[parId] corresponds to the
# number of time steps, max. 4 are needed for disaggegration
# with the old and original method
# run over all grib messages and perform
# shifting in time
if len(deac_vals[parId]) >= 3:
if len(deac_vals[parId]) > 3:
if not c.rrint and (parId == 142 or parId == 143):
values = disaggregation.darain(deac_vals[parId])
else:
values = disaggregation.dapoly(deac_vals[parId])
if not (step == c.maxstep and c.purefc \
or t_dt == t_enddate):
# remove first time step in list to shift
# time line
orig_vals[parId].pop(0)
deac_vals[parId].pop(0)
else:
# if the third time step is read (per parId),
# write out the first one as a boundary value
if c.purefc:
values = deac_vals[parId][1]
else:
values = deac_vals[parId][0]
if not (c.rrint and (parId == 142 or parId == 143)):
codes_set_values(gid, values)
if c.purefc:
codes_set(gid, 'stepRange', max(0, step-2*int(c.dtime)))
else:
codes_set(gid, 'stepRange', 0)
codes_set(gid, 'time', t_m2dt.hour*100)
codes_set(gid, 'date', int(t_m2dt.strftime('%Y%m%d')))
codes_write(gid, f_handle)
# squeeze out information of last two steps
# contained in deac_vals[parId]
# Note that deac_vals[parId][0] has not been popped
# in this case
if step == c.maxstep and c.purefc or \
t_dt == t_enddate:
# last step
if c.purefc:
values = deac_vals[parId][3]
codes_set_values(gid, values)
codes_set(gid, 'stepRange', step)
#truedatetime = t_m2dt + timedelta(hours=2*int(c.dtime))
codes_write(gid, h_handle)
else:
values = deac_vals[parId][3]
codes_set_values(gid, values)
codes_set(gid, 'stepRange', 0)
truedatetime = t_m2dt + timedelta(hours=2*int(c.dtime))
codes_set(gid, 'time', truedatetime.hour * 100)
codes_set(gid, 'date', int(truedatetime.strftime('%Y%m%d')))
codes_write(gid, h_handle)
if parId == 142 or parId == 143:
values = disaggregation.darain(list(reversed(deac_vals[parId])))
else:
values = disaggregation.dapoly(list(reversed(deac_vals[parId])))
# step before last step
if c.purefc:
codes_set(gid, 'stepRange', step-int(c.dtime))
#truedatetime = t_m2dt + timedelta(hours=int(c.dtime))
codes_set_values(gid, values)
codes_write(gid, g_handle)
else:
codes_set(gid, 'stepRange', 0)
truedatetime = t_m2dt + timedelta(hours=int(c.dtime))
codes_set(gid, 'time', truedatetime.hour * 100)
codes_set(gid, 'date', int(truedatetime.strftime('%Y%m%d')))
codes_set_values(gid, values)
codes_write(gid, g_handle)
codes_release(gid)
gid = codes_new_from_index(iid)
f_handle.close()
g_handle.close()
h_handle.close()
codes_index_release(iid)
if c.rrint:
self._create_rr_grib_dummy(inputfiles.files[0], c.inputdir)
self._prep_new_rrint(dims[0], dims[1], dims[2], lsp_np,
cp_np, maxnum, index_keys, index_vals, c)
return
def _prep_new_rrint(self, ni, nj, nt, lsp_np, cp_np, maxnum, index_keys, index_vals, c):
'''Calculates and writes out the disaggregated precipitation fields.
Disaggregation is done in time and original times are written to the
flux files, while the additional subgrid times are written to
extra files output files. They are named like the original files with
suffixes "_1" and "_2" for the first and second subgrid point.
Parameters
----------
ni : int
Amount of zonal grid points.
nj : int
Amount of meridional grid points.
nt : int
Number of time steps.
lsp_np : numpy array of float
The large scale precipitation fields for each time step.
Shape (ni * nj, nt).
cp_np : numpy array of float
The convective precipitation fields for each time step.
Shape (ni * nj, nt).
maxnum : int
The maximum number of ensemble members. It is None
if there are no or just one ensemble.
index_keys : dictionary
List of parameter names which serves as index.
index_vals : list of list of str
Contains the values from the keys used for a distinct selection
of grib messages in processing the grib files.
Content looks like e.g.:
index_vals[0]: ('20171106', '20171107', '20171108') ; date
index_vals[1]: ('0', '1200', '1800', '600') ; time
index_vals[2]: ('0', '12', '3', '6', '9') ; stepRange
c : ControlFile
Contains all the parameters of CONTROL file and
command line.
Return
------
'''
import numpy as np
print('... disaggregation of precipitation with new method.')
tmpfile = os.path.join(c.inputdir, 'rr_grib_dummy.grb')
# initialize new numpy arrays for disaggregated fields
if maxnum:
lsp_new_np = np.zeros((maxnum, ni * nj, nt * 3), dtype=np.float64)
cp_new_np = np.zeros((maxnum, ni * nj, nt * 3), dtype=np.float64)
else:
lsp_new_np = np.zeros((1, ni * nj, nt * 3), dtype=np.float64)
cp_new_np = np.zeros((1, ni * nj, nt * 3), dtype=np.float64)
# do the disaggregation, but neglect the last value of the
# original time series. This one corresponds for example to
# 24 hour, which we don't need. we use 0 - 23 UTC for a day.
if maxnum:
for inum in range(maxnum):
for ix in range(ni*nj):
lsp_new_np[inum, ix, :] = disaggregation.IA3(lsp_np[inum, ix, :])[:-1]
cp_new_np[inum, ix, :] = disaggregation.IA3(cp_np[inum, ix, :])[:-1]
else:
for ix in range(ni*nj):
lsp_new_np[0, ix, :] = disaggregation.IA3(lsp_np[ix, :])[:-1]
cp_new_np[0, ix, :] = disaggregation.IA3(cp_np[ix, :])[:-1]
# write to grib files (full/orig times to flux file and inbetween
# times with step 1 and 2, respectively)
print('... write disaggregated precipitation to files.')
if maxnum:
# remember the index of the number values
index_number = index_keys.index('number')
# empty set to save unique ensemble numbers which were already processed
ens_numbers = set()
# index for the ensemble number
inumb = 0
else:
inumb = 0
# index variable of disaggregated fields
it = 0
# "product" genereates each possible combination between the
# values of the index keys
for prod in product(*index_vals):
# e.g. prod = ('20170505', '0', '12')
# ( date ,time, step)
# or prod = ('0' , '20170505', '0', '12')
# (number, date ,time, step)
cdate = prod[index_keys.index('date')]
ctime = '{:0>2}'.format(int(prod[index_keys.index('time')])//100)
cstep = '{:0>3}'.format(int(prod[index_keys.index('step')]))
date = datetime.strptime(cdate + ctime, '%Y%m%d%H')
date += timedelta(hours=int(cstep))
start_period, end_period = generate_retrieval_period_boundary(c)
# skip all temporary times
# which are outside the retrieval period
if date < start_period or \
date > end_period:
continue
# the whole process has to be done for each seperate ensemble member
# therefore, for each new ensemble member we delete old flux values
# and start collecting flux data from the beginning time step
if maxnum and prod[index_number] not in ens_numbers:
ens_numbers.add(prod[index_number])
inumb = int(prod[index_number])
it = 0
# if necessary, add ensemble member number to filename suffix
# otherwise, add empty string
if maxnum:
numbersuffix = '.N{:0>3}'.format(int(prod[index_number]))
else:
numbersuffix = ''
# per original time stamp: write original time step and
# the two newly generated sub time steps
if c.purefc:
fluxfilename = 'flux' + date.strftime('%Y%m%d.%H') + '.' + cstep
else:
fluxfilename = 'flux' + date.strftime('%Y%m%d%H') + numbersuffix
# write original time step to flux file as usual
fluxfile = GribUtil(os.path.join(c.inputdir, fluxfilename))
fluxfile.set_keys(tmpfile, filemode='ab',
wherekeynames=['paramId'], wherekeyvalues=[142],
keynames=['perturbationNumber', 'date', 'time',
'stepRange', 'values'],
keyvalues=[inumb, int(date.strftime('%Y%m%d')),
date.hour*100, 0, lsp_new_np[inumb, :, it]]
)
fluxfile.set_keys(tmpfile, filemode='ab',
wherekeynames=['paramId'], wherekeyvalues=[143],
keynames=['perturbationNumber', 'date', 'time',
'stepRange', 'values'],
keyvalues=[inumb, int(date.strftime('%Y%m%d')),
date.hour*100, 0, cp_new_np[inumb, :, it]]
)
# rr for first subgrid point is identified by step = 1
fluxfile.set_keys(tmpfile, filemode='ab',
wherekeynames=['paramId'], wherekeyvalues=[142],
keynames=['perturbationNumber', 'date', 'time',
'stepRange', 'values'],
keyvalues=[inumb, int(date.strftime('%Y%m%d')),
date.hour*100, '1', lsp_new_np[inumb, :, it+1]]
)
fluxfile.set_keys(tmpfile, filemode='ab',
wherekeynames=['paramId'], wherekeyvalues=[143],
keynames=['perturbationNumber', 'date', 'time',
'stepRange', 'values'],
keyvalues=[inumb, int(date.strftime('%Y%m%d')),
date.hour*100, '1', cp_new_np[inumb, :, it+1]]
)
# rr for second subgrid point is identified by step = 2
fluxfile.set_keys(tmpfile, filemode='ab',
wherekeynames=['paramId'], wherekeyvalues=[142],
keynames=['perturbationNumber', 'date', 'time',
'stepRange', 'values'],
keyvalues=[inumb, int(date.strftime('%Y%m%d')),
date.hour*100, '2', lsp_new_np[inumb, :, it+2]]
)
fluxfile.set_keys(tmpfile, filemode='ab',
wherekeynames=['paramId'], wherekeyvalues=[143],
keynames=['perturbationNumber', 'date', 'time',
'stepRange', 'values'],
keyvalues=[inumb, int(date.strftime('%Y%m%d')),
date.hour*100, '2', cp_new_np[inumb, :, it+2]]
)
it = it + 3 # jump to next original time step in rr fields
return
def _create_rr_grib_dummy(self, ifile, inputdir):
'''Creates a grib file with a dummy message for the two precipitation
types lsp and cp each.
Parameters
----------
ifile : str
Filename of the input file to read the grib messages from.
inputdir : str, optional
Path to the directory where the retrieved data is stored.
Return
------
'''
gribfile = GribUtil(os.path.join(inputdir, 'rr_grib_dummy.grb'))
gribfile.copy_dummy_msg(ifile, keynames=['paramId','paramId'],
keyvalues=[142,143], filemode='wb')
return
def create(self, inputfiles, c):
'''An index file will be created which depends on the combination
of "date", "time" and "stepRange" values. This is used to iterate
over all messages in each grib file which were passed through the
parameter "inputfiles" to seperate specific parameters into fort.*
files. Afterwards the FORTRAN program is called to convert
the data fields all to the same grid and put them in one file
per unique time step (combination of "date", "time" and
"stepRange").
Note
----
This method is based on the ECMWF example index.py
https://software.ecmwf.int/wiki/display/GRIB/index.py
Parameters
----------
inputfiles : UioFiles
Contains a list of files.
c : ControlFile
Contains all the parameters of CONTROL file and
command line.
Return
------
'''
from eccodes import (codes_index_select, codes_get,
codes_get_values, codes_set_values, codes_set,
codes_write, codes_release, codes_new_from_index,
codes_index_release)
# generate start and end timestamp of the retrieval period
start_period = datetime.strptime(c.start_date + c.time[0], '%Y%m%d%H')
start_period = start_period + timedelta(hours=int(c.step[0]))
end_period = datetime.strptime(c.end_date + c.time[-1], '%Y%m%d%H')
end_period = end_period + timedelta(hours=int(c.step[-1]))
# @WRF
# THIS IS NOT YET CORRECTLY IMPLEMENTED !!!
#
# UNDER CONSTRUCTION !!!
#
#if c.wrf:
# table128 = init128(_config.PATH_GRIBTABLE)
# wrfpars = to_param_id('sp/mslp/skt/2t/10u/10v/2d/z/lsm/sst/ci/sd/\
# stl1/stl2/stl3/stl4/swvl1/swvl2/swvl3/swvl4',
# table128)
# these numbers are indices for the temporary files "fort.xx"
# which are used to seperate the grib fields to,
# for the Fortran program input
# 10: U,V | 11: T | 12: lnsp | 13: D | 16: sfc fields
# 17: Q | 18: Q, SL, GG| 19: omega | 21: etadot | 22: clwc+ciwc
fdict = {'10':None, '11':None, '12':None, '13':None, '16':None,
'17':None, '18':None, '19':None, '21':None, '22':None}
iid = None
index_vals = None
# get the values of the keys which are used for distinct access
# of grib messages via product
if '/' in self.number:
# more than one ensemble member is selected
index_keys = ["number", "date", "time", "step"]
else:
index_keys = ["date", "time", "step"]
iid, index_vals = self._mk_index_values(c.inputdir,
inputfiles,
index_keys)
# index_vals looks like e.g.:
# index_vals[0]: ('20171106', '20171107', '20171108') ; date
# index_vals[1]: ('0', '600', '1200', '1800') ; time
# index_vals[2]: ('0', '12', '3', '6', '9') ; stepRange
# "product" genereates each possible combination between the
# values of the index keys
for prod in product(*index_vals):
# e.g. prod = ('20170505', '0', '12')
# ( date ,time, step)
print('current product: ', prod)
for i in range(len(index_keys)):
codes_index_select(iid, index_keys[i], prod[i])
# get first id from current product
gid = codes_new_from_index(iid)
# if there is no data for this specific time combination / product
# skip the rest of the for loop and start with next timestep/product
if not gid:
continue
#============================================================================================
# remove old fort.* files and open new ones
# they are just valid for a single product
for k, f in fdict.items():
fortfile = os.path.join(c.inputdir, 'fort.' + k)
silent_remove(fortfile)
fdict[k] = open(fortfile, 'wb')
#============================================================================================
# create correct timestamp from the three time informations
cdate = str(codes_get(gid, 'date'))
ctime = '{:0>2}'.format(codes_get(gid, 'time') // 100)
cstep = '{:0>3}'.format(codes_get(gid, 'step'))
timestamp = datetime.strptime(cdate + ctime, '%Y%m%d%H')
timestamp += timedelta(hours=int(cstep))
cdate_hour = datetime.strftime(timestamp, '%Y%m%d%H')
# if basetime is used, adapt start/end date period
if c.basetime is not None:
time_delta = timedelta(hours=12-int(c.dtime))
start_period = datetime.strptime(c.end_date + str(c.basetime),
'%Y%m%d%H') - time_delta
end_period = datetime.strptime(c.end_date + str(c.basetime),
'%Y%m%d%H')
# skip all temporary times
# which are outside the retrieval period
if timestamp < start_period or \
timestamp > end_period:
continue
# @WRF
# THIS IS NOT YET CORRECTLY IMPLEMENTED !!!
#
# UNDER CONSTRUCTION !!!
#
#if c.wrf:
# if 'olddate' not in locals() or cdate != olddate:
# fwrf = open(os.path.join(c.outputdir,
# 'WRF' + cdate + '.' + ctime + '.000.grb2'), 'wb')
# olddate = cdate[:]
#============================================================================================
# savedfields remembers which fields were already used.
savedfields = []
# sum of cloud liquid and ice water content
scwc = None
while 1:
if not gid:
break
paramId = codes_get(gid, 'paramId')
gridtype = codes_get(gid, 'gridType')
if paramId == 77: # ETADOT
codes_write(gid, fdict['21'])
elif paramId == 130: # T
codes_write(gid, fdict['11'])
elif paramId == 131 or paramId == 132: # U, V wind component
codes_write(gid, fdict['10'])
elif paramId == 133 and gridtype != 'reduced_gg': # Q
codes_write(gid, fdict['17'])
elif paramId == 133 and gridtype == 'reduced_gg': # Q, gaussian
codes_write(gid, fdict['18'])
elif paramId == 135: # W
codes_write(gid, fdict['19'])
elif paramId == 152: # LNSP
codes_write(gid, fdict['12'])
elif paramId == 155 and gridtype == 'sh': # D
codes_write(gid, fdict['13'])
elif paramId == 246 or paramId == 247: # CLWC, CIWC
# sum cloud liquid water and ice
if scwc is None:
scwc = codes_get_values(gid)
else:
scwc += codes_get_values(gid)
codes_set_values(gid, scwc)
codes_set(gid, 'paramId', 201031)
codes_write(gid, fdict['22'])
scwc = None
# @WRF
# THIS IS NOT YET CORRECTLY IMPLEMENTED !!!
#
# UNDER CONSTRUCTION !!!
#
#elif c.wrf and paramId in [129, 138, 155] and \
# levtype == 'hybrid': # Z, VO, D
# # do not do anything right now
# # these are specific parameter for WRF
# pass
else:
if paramId not in savedfields:
# SD/MSL/TCC/10U/10V/2T/2D/Z/LSM/SDOR/CVL/CVH/SR
# and all ADDPAR parameter
codes_write(gid, fdict['16'])
savedfields.append(paramId)
else:
print('duplicate ' + str(paramId) + ' not written')
# @WRF
# THIS IS NOT YET CORRECTLY IMPLEMENTED !!!
#
# UNDER CONSTRUCTION !!!
#
#try:
# if c.wrf:
# # model layer
# if levtype == 'hybrid' and \
# paramId in [129, 130, 131, 132, 133, 138, 155]:
# codes_write(gid, fwrf)
# # sfc layer
# elif paramId in wrfpars:
# codes_write(gid, fwrf)
#except AttributeError:
# pass
codes_release(gid)
gid = codes_new_from_index(iid)
#============================================================================================
for f in fdict.values():
f.close()
#============================================================================================
# call for Fortran program to convert e.g. reduced_gg grids to
# regular_ll and calculate detadot/dp
pwd = os.getcwd()
os.chdir(c.inputdir)
if os.stat('fort.21').st_size == 0 and c.eta:
print('Parameter 77 (etadot) is missing, most likely it is '
'not available for this type or date / time\n')
print('Check parameters CLASS, TYPE, STREAM, START_DATE\n')
my_error('fort.21 is empty while parameter eta '
'is set to 1 in CONTROL file')
# ============================================================================================
# write out all output to log file before starting fortran programm
sys.stdout.flush()
# Fortran program creates file fort.15 (with u,v,etadot,t,sp,q)
execute_subprocess([os.path.join(c.exedir,
_config.FORTRAN_EXECUTABLE)],
error_msg='FORTRAN PROGRAM FAILED!')#shell=True)
os.chdir(pwd)
# ============================================================================================
# create name of final output file, e.g. EN13040500 (ENYYMMDDHH)
# for CERA-20C we need all 4 digits for the year sinc 1900 - 2010
if c.purefc:
if c.marsclass == 'EP':
suffix = cdate[0:8] + '.' + ctime + '.' + cstep
else:
suffix = cdate[2:8] + '.' + ctime + '.' + cstep
else:
if c.marsclass == 'EP':
suffix = cdate_hour[0:10]
else:
suffix = cdate_hour[2:10]
# if necessary, add ensemble member number to filename suffix
if 'number' in index_keys:
index_number = index_keys.index('number')
if len(index_vals[index_number]) > 1:
suffix = suffix + '.N{:0>3}'.format(int(prod[index_number]))
fnout = os.path.join(c.inputdir, c.prefix + suffix)
print("outputfile = " + fnout)
# collect for final processing
self.outputfilelist.append(os.path.basename(fnout))
# # get additional precipitation subgrid data if available
# if c.rrint:
# self.outputfilelist.append(os.path.basename(fnout + '_1'))
# self.outputfilelist.append(os.path.basename(fnout + '_2'))
# ============================================================================================
# create outputfile and copy all data from intermediate files
# to the outputfile (final GRIB input files for FLEXPART)
orolsm = os.path.basename(glob.glob(c.inputdir +
'/OG_OROLSM__SL.*.' +
c.ppid +
'*')[0])
if c.marsclass == 'EP':
fluxfile = 'flux' + suffix
else:
fluxfile = 'flux' + cdate[0:2] + suffix
if not c.cwc:
flist = ['fort.15', fluxfile, 'fort.16', orolsm]
else:
flist = ['fort.15', 'fort.22', fluxfile, 'fort.16', orolsm]
with open(fnout, 'wb') as fout:
for f in flist:
shutil.copyfileobj(open(os.path.join(c.inputdir, f), 'rb'),
fout)
if c.omega:
with open(os.path.join(c.outputdir, 'OMEGA'), 'wb') as fout:
shutil.copyfileobj(open(os.path.join(c.inputdir, 'fort.25'),
'rb'), fout)
# ============================================================================================
# @WRF
# THIS IS NOT YET CORRECTLY IMPLEMENTED !!!
#
# UNDER CONSTRUCTION !!!
#
#if c.wrf:
# fwrf.close()
codes_index_release(iid)
return
def calc_extra_elda(self, path, prefix):
''' Calculates extra ensemble members for ELDA - Stream.
This is a specific feature which doubles the number of ensemble members
for the ELDA Stream.
Parameters
----------
path : str
Path to the output files.
prefix : str
The prefix of the output filenames as defined in Control file.
Return
------
'''
from eccodes import (codes_grib_new_from_file, codes_get_array,
codes_set_array, codes_release,
codes_set, codes_write)
# max number
maxnum = int(self.number.split('/')[-1])
# get a list of all prepared output files with control forecast (CF)
cf_filelist = UioFiles(path, prefix + '*.N000')
cf_filelist.files = sorted(cf_filelist.files)
for cffile in cf_filelist.files:
with open(cffile, 'rb') as f:
cfvalues = []
while True:
fid = codes_grib_new_from_file(f)
if fid is None:
break
cfvalues.append(codes_get_array(fid, 'values'))
codes_release(fid)
filename = cffile.split('N000')[0]
for i in range(1, maxnum + 1):
# read an ensemble member
g = open(filename + 'N{:0>3}'.format(i), 'rb')
# create file for newly calculated ensemble member
h = open(filename + 'N{:0>3}'.format(i+maxnum), 'wb')
# number of message in grib file
j = 0
while True:
gid = codes_grib_new_from_file(g)
if gid is None:
break
values = codes_get_array(gid, 'values')
# generate a new ensemble member by subtracting
# 2 * ( current time step value - last time step value )
codes_set_array(gid, 'values',
values-2*(values-cfvalues[j]))
codes_set(gid, 'number', i+maxnum)
codes_write(gid, h)
codes_release(gid)
j += 1
g.close()
h.close()
print('wrote ' + filename + 'N{:0>3}'.format(i+maxnum))
self.outputfilelist.append(
os.path.basename(filename + 'N{:0>3}'.format(i+maxnum)))
return
def process_output(self, c):
'''Postprocessing of FLEXPART input files.
The grib files are postprocessed depending on the selection in
CONTROL file. The resulting files are moved to the output
directory if its not equal to the input directory.
The following modifications might be done if
properly switched in CONTROL file:
GRIB2 - Conversion to GRIB2
ECTRANS - Transfer of files to gateway server
ECSTORAGE - Storage at ECMWF server
Parameters
----------
c : ControlFile
Contains all the parameters of CONTROL file and
command line.
Return
------
'''
print('\n\nPostprocessing:\n Format: {}\n'.format(c.format))
if _config.FLAG_ON_ECMWFSERVER:
print('ecstorage: {}\n ecfsdir: {}\n'.
format(c.ecstorage, c.ecfsdir))
print('ectrans: {}\n gateway: {}\n destination: {}\n '
.format(c.ectrans, c.gateway, c.destination))
print('Output filelist: ')
print(sorted(self.outputfilelist))
for ofile in self.outputfilelist:
ofile = os.path.join(self.inputdir, ofile)
if c.format.lower() == 'grib2':
execute_subprocess(['grib_set', '-s', 'edition=2,' +
'productDefinitionTemplateNumber=8',
ofile, ofile + '_2'],
error_msg='GRIB2 CONVERSION FAILED!')
execute_subprocess(['mv', ofile + '_2', ofile],
error_msg='RENAMING FOR NEW GRIB2 FORMAT '
'FILES FAILED!')
if c.ectrans and _config.FLAG_ON_ECMWFSERVER:
execute_subprocess(['ectrans', '-overwrite', '-gateway',
c.gateway, '-remote', c.destination,
'-source', ofile],
error_msg='TRANSFER TO LOCAL SERVER FAILED!')
if c.ecstorage and _config.FLAG_ON_ECMWFSERVER:
execute_subprocess(['ecp', '-o', ofile,
os.path.expandvars(c.ecfsdir)],
error_msg='COPY OF FILES TO ECSTORAGE '
'AREA FAILED!')
if c.outputdir != c.inputdir:
execute_subprocess(['mv', os.path.join(c.inputdir, ofile),
c.outputdir],
error_msg='RELOCATION OF OUTPUT FILES '
'TO OUTPUTDIR FAILED!')
return
```
#### File: Python/Classes/UioFiles.py
```python
import os
import sys
import fnmatch
# software specific modules from flex_extract
#pylint: disable=wrong-import-position
sys.path.append('../')
from Mods.tools import silent_remove, get_list_as_string
#pylint: enable=wrong-import-position
# ------------------------------------------------------------------------------
# CLASS
# ------------------------------------------------------------------------------
class UioFiles(object):
"""Collection of files matching a specific pattern.
The pattern can contain regular expressions for the files.
The files are listed and can be transformed to a single string or
they can be deleted.
Attributes
----------
path : str
Directory where to list the files.
pattern : str
Regular expression pattern. For example: '*.grb'
files : list of str
List of files matching the pattern in the path.
"""
# --------------------------------------------------------------------------
# CLASS METHODS
# --------------------------------------------------------------------------
def __init__(self, path, pattern):
"""Assignes a specific pattern for these files.
Parameters
----------
path : str
Directory where to list the files.
pattern : str
Regular expression pattern. For example: '*.grb'
Return
------
"""
self.path = path
self.pattern = pattern
self.files = []
self._list_files(self.path)
return
def _list_files(self, path):
"""Lists all files in the directory with the matching
regular expression pattern.
Parameters
----------
path : str
Path to the files.
Return
------
"""
# Get the absolute path
path = os.path.abspath(path)
# get all files in the dir and subdir as absolut path
# pylint: disable=W0612
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, self.pattern):
self.files.append(os.path.join(root, filename))
return
def __str__(self):
"""Converts the list of files into a single string.
The entries are sepereated by "," sign.
Parameters
----------
Return
------
files_string : str
The content of the list as a single string.
"""
filenames = [os.path.basename(f) for f in self.files]
files_string = get_list_as_string(filenames, concatenate_sign=', ')
return files_string
def delete_files(self):
"""Deletes the files.
Parameters
----------
Return
------
"""
for old_file in self.files:
silent_remove(old_file)
return
``` |
{
"source": "2985578957/torchpf",
"score": 3
} |
#### File: 2985578957/torchpf/example.py
```python
from torchpf import show_stat
from torchpf import cal_Flops, cal_MAdd, cal_Memory, cal_params
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(56180, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 56180)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
if __name__ == '__main__':
model = Net()
input_size=(3, 224, 224)
show_stat(model, input_size)
print('Flops = ',cal_Flops(model, input_size))
print('MAdd = ',cal_MAdd(model, input_size))
print('Memory = ',cal_Memory(model, input_size))
print('Params = ',cal_params(model, input_size))
```
#### File: torchpf/torchpf/compute_flops.py
```python
import torch.nn as nn
import numpy as np
def compute_flops(module, inp, out, DEBUG=False):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_flops(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_flops(module, inp, out)
elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):
return compute_Pool2d_flops(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):
return compute_ReLU_flops(module, inp, out)
elif isinstance(module, nn.Upsample):
return compute_Upsample_flops(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_flops(module, inp, out)
else:
if DEBUG:
print(f"[Flops]: {type(module).__name__} is not supported!")
return 0
def compute_Conv2d_flops(module, inp, out):
# Can have multiple inputs, getting the first one
assert isinstance(module, nn.Conv2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
k_h, k_w = module.kernel_size
out_c, out_h, out_w = out.size()[1:]
groups = module.groups
filters_per_channel = out_c // groups
conv_per_position_flops = k_h * k_w * in_c * filters_per_channel
active_elements_count = batch_size * out_h * out_w
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return total_flops
def compute_BatchNorm2d_flops(module, inp, out):
assert isinstance(module, nn.BatchNorm2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
# in_c, in_h, in_w = inp.size()[1:]
batch_flops = np.prod(inp.shape)
if module.affine:
batch_flops *= 2
return batch_flops
def compute_ReLU_flops(module, inp, out):
assert isinstance(module, (nn.ReLU, nn.ReLU6,
nn.PReLU, nn.ELU, nn.LeakyReLU))
batch_size = inp.size()[0]
active_elements_count = batch_size
for s in inp.size()[1:]:
active_elements_count *= s
return active_elements_count
def compute_Pool2d_flops(module, inp, out):
assert isinstance(module, nn.MaxPool2d) or isinstance(module, nn.AvgPool2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
return np.prod(inp.shape)
def compute_Linear_flops(module, inp, out):
assert isinstance(module, nn.Linear)
assert len(inp.size()) == 2 and len(out.size()) == 2
batch_size = inp.size()[0]
return batch_size * inp.size()[1] * out.size()[1]
def compute_Upsample_flops(module, inp, out):
assert isinstance(module, nn.Upsample)
output_size = out[0]
batch_size = inp.size()[0]
output_elements_count = batch_size
for s in output_size.shape[1:]:
output_elements_count *= s
return output_elements_count
```
#### File: torchpf/torchpf/statistics.py
```python
import torch.nn as nn
from . import ModelHook
from collections import OrderedDict
from . import StatTree, StatNode, report_format
from .compute_memory import num_params
def get_parent_node(root_node, stat_node_name):
assert isinstance(root_node, StatNode)
node = root_node
names = stat_node_name.split('.')
for i in range(len(names) - 1):
node_name = '.'.join(names[0:i+1])
child_index = node.find_child_index(node_name)
assert child_index != -1
node = node.children[child_index]
return node
def convert_leaf_modules_to_stat_tree(leaf_modules):
assert isinstance(leaf_modules, OrderedDict)
create_index = 1
root_node = StatNode(name='root', parent=None)
for leaf_module_name, leaf_module in leaf_modules.items():
names = leaf_module_name.split('.')
for i in range(len(names)):
create_index += 1
stat_node_name = '.'.join(names[0:i+1])
parent_node = get_parent_node(root_node, stat_node_name)
node = StatNode(name=stat_node_name, parent=parent_node)
parent_node.add_child(node)
if i == len(names) - 1: # leaf module itself
input_shape = leaf_module.input_shape.numpy().tolist()
output_shape = leaf_module.output_shape.numpy().tolist()
node.input_shape = input_shape
node.output_shape = output_shape
node.parameter_quantity = leaf_module.parameter_quantity.numpy()[
0]
node.inference_memory = leaf_module.inference_memory.numpy()[0]
node.MAdd = leaf_module.MAdd.numpy()[0]
node.Flops = leaf_module.Flops.numpy()[0]
node.duration = leaf_module.duration.numpy()[0]
node.Memory = leaf_module.Memory.numpy().tolist()
return StatTree(root_node)
class ModelStat(object):
def __init__(self, model, input_size, query_granularity=1, DEBUG=False):
assert isinstance(model, nn.Module)
assert isinstance(input_size, (tuple, list)) and len(input_size) == 3
self._model = model
self._input_size = input_size
self._query_granularity = query_granularity
self.DEBUG = DEBUG
def _analyze_model(self):
model_hook = ModelHook(self._model, self._input_size, self.DEBUG)
leaf_modules = model_hook.retrieve_leaf_modules()
stat_tree = convert_leaf_modules_to_stat_tree(leaf_modules)
collected_nodes = stat_tree.get_collected_stat_nodes(
self._query_granularity)
return collected_nodes
def show_report(self):
collected_nodes = self._analyze_model()
report = report_format(collected_nodes)
print(report)
def get_stat(model, input_size, query_granularity=1, DEBUG=False):
return ModelStat(model, input_size, query_granularity, DEBUG=DEBUG)._analyze_model()
def show_stat(model, input_size, query_granularity=1, DEBUG=False):
ms = ModelStat(model, input_size, query_granularity, DEBUG=DEBUG)
ms.show_report()
def cal_Flops(model, input_size, clever_format=True, query_granularity=1, DEBUG=False):
ms = ModelStat(model, input_size, query_granularity, DEBUG=DEBUG)
analyze_data = ms._analyze_model()
Flops = 0
for i in range(len(analyze_data)):
Flops += analyze_data[i].Flops
if clever_format:
if Flops > 1E9:
return f'{Flops/1E9:.2f}G'
elif Flops > 1E6:
return f'{Flops/1E6:.2f}M'
elif Flops > 1E3:
return f'{Flops/1E3:.2f}K'
else:
return Flops
else:
return Flops
def cal_MAdd(model, input_size, clever_format=True, query_granularity=1, DEBUG=False):
ms = ModelStat(model, input_size, query_granularity, DEBUG=DEBUG)
analyze_data = ms._analyze_model()
MAdd = 0
for i in range(len(analyze_data)):
MAdd += analyze_data[i].MAdd
if clever_format:
if MAdd > 1E9:
return f'{MAdd/1E9:.2f}G'
elif MAdd > 1E6:
return f'{MAdd/1E6:.2f}M'
elif MAdd > 1E3:
return f'{MAdd/1E3:.2f}K'
else:
return MAdd
else:
return MAdd
def cal_Memory(model, input_size, clever_format=True, query_granularity=1, DEBUG=False):
ms = ModelStat(model, input_size, query_granularity, DEBUG=DEBUG)
analyze_data = ms._analyze_model()
Memory = [0, 0]
for i in range(len(analyze_data)):
Memory[0] += analyze_data[i].Memory[0]
Memory[1] += analyze_data[i].Memory[1]
Memory = sum(Memory)
if clever_format:
if Memory > 1024**3:
return f'{Memory/1024**3:.2f}G'
elif Memory > 1024**2:
return f'{Memory/1024**2:.2f}M'
elif Memory > 1024:
return f'{Memory/1024:.2f}K'
else:
return Memory
else:
return Memory
def cal_params(model, clever_format=True):
params = num_params(model)
if clever_format:
if params > 1E9:
return f'{params/1E9:.2f}G'
elif params > 1E6:
return f'{params/1E6:.2f}M'
elif params > 1E3:
return f'{params/1E3:.2f}K'
else:
return params
else:
return params
``` |
{
"source": "299hannah/password-locker",
"score": 3
} |
#### File: 299hannah/password-locker/user.py
```python
import random
import string
import pyperclip
class User:
"""class"""
userList = []
def __init__(self, username, password):
" magic constructor method "
self.username = username
self.password = password
self.isLoggedin = False
def CreateUser(username, password):
"""method"""
newUser = User(username, password)
return newUser
def login(self):
print("logged in successfully")
def saveUser(self, username, password):
"method"
User.userList.append(self)
@classmethod
def displayUser(cls):
return cls.userList
def deleteUser(self):
User.userList.remove(self)
class Credentials:
credentials_list = []
@classmethod
def verify_user(cls, username, password):
aUser = ""
for user in User.userList:
if (user.username == username and user.password == password):
aUser == user.username
return aUser
def __init__(self, account, username, password):
"""
cedentials to be stored
"""
self.account = account
self.username = username #sjsjsj
self.password = password
def save_details(self):
Credentials.credentials_list.append(self)
def delete_credentials(self):
Credentials.credentials_list.remove(self)
@classmethod
def createCredential(account,username, password):
"creates new credential"
newCredential = Credentials(username, password)
return newCredential
def save_credentials(account,username, password):
"save credentials in the list"
return Credentials.display_credentials()
def find_credential(cls, account):
"method that takes class name and returns the account name credential"
for credential in cls.credentials_list:
if credential.account == account:
return credential
print("There is no such account dear")
@classmethod
def copy_password(cls, account):
found_credentials = Credentials.find_credentials(account)
pyperclip.copy(found_credentials.password)
@classmethod
def credentialExist(cls, account):
"checks if the credential exists from the list"
for credential in cls.credentials_list:
if credential.account == account:
return True
return False
@classmethod
def display_credentials(cls):
"returns all credentials in the list"
return cls.credentials_list
def generatePassword(stringLength=8):
"generates a random password "
password = string.ascii_uppercase + string.ascii_lowercase + string.digits + "!@#"
return ''.join(random.choice(password) for i in range(stringLength))
def copypassword(parameter_list):
"""
method that allows copying of password to keyboard
"""
pass
def main():
isTrue = True
print(
"Welcome to password Locker.Here you manage your passwords and even generate new passwords."
)
while isTrue == True:
# print(
# "Hi , your account has logged in successfully!"
# )
print(
"Please enter one to proceed:\n\n 1. ca for Create new Account\n 2. lg for login\n 3. ex for Exit"
)
shortCode = input("").lower().strip()
if shortCode == "ca":
print("Sign Up Account")
print("*" * 20)
print("Username:")
username = input()
while True:
print(
"1. Type TP s to type your own password:\n or \n 2. GP for generating random password"
)
passwordOption = input().lower().strip()
if passwordOption == 'tp':
print("Enter Your Password")
password = input("<PASSWORD>")
break
elif passwordOption == 'gp':
password = Credentials.generatePassword()
break
else:
print("invalid pasword")
User.CreateUser(username, password)
User.saveUser(username, password)
print("\n")
print(
f"Hi {username}, your account has been created successfully! \n Your password is: {password}"
)
elif shortCode == 'lg':
print("*" * 50)
print("Enter your username and password")
print("*" * 50)
print("Username")
username = input()
print("password")
password = input()
for user in User.userList:
if username == user.username:
if user.password == password:
print(user.login())
else:
User.CreateUser(username, password)
User.saveUser(username, password)
print("\n")
print(
f"Hi {username}, your account has logged in successfully! \n Your password is: {password}"
)
else:
print("Create Account")
break
# elif shortCode == 'ex':
# print("See you later!!")
# break
# else:
# print("invalid! check your entry again \n")
while True:
print(
"what do you want to do?\n 1. cc for create new credentials \n 2. ds for Display existing Credentials\n 3. fc for find a credential \n 4. dc for Delete an existing credential \n 5. ex-Exit application"
)
shortCode = input().lower().strip()
if shortCode == 'cc':
print("New Credential account")
print("\n")
print("Account Name example Instagram")
account = input().lower()
print("Account username: ")
username = input()
print("password")
password=input()
Credentials.save_credentials(account,username,password)
print('/n')
print(
f"Account credential for: {account} - username: {username} - password:{password} created successfully"
)
print("/n")
# while True:
# print(
# "1. TP- To type your password if already have an account:\n 2.GP-To generate random password"
# )
# passwordOption = input().lower()
# if passwordOption == 'TP':
# print("Account's Password :")
# password = input().lower()
# elif passwordOption == 'GP':
# password = Credentials.generatePassword()
# break
# else:
# print("invalid password please try again")
# Credentials.createCredential(account, username, password)
# Credentials.save_credentials(username,password)
# print('/n')
# print(
# f"Account credential for: {account} - username: {username} - password:{password} created successfully"
# )
# print("/n")
elif shortCode == "ds":
# if Credentials.display_credentials():
print("Your credentials include: \n")
for credential in Credentials.credentials_list:
account = account
username = username
password = password
print(
f"Account name: {account}\n Account username: {username}\n Account password: {password}\n"
)
else:
print("You have no saved credentials\n")
elif shortCode == "fc":
print("Enter the Account Name you want to search for")
account = input().lower().strip()
if Credentials.credentialExist(account):
searchAccount = Credentials.find_credential(cls, account)
print(
f"Account name: {searchAccount} password :{searchAccount.password}"
)
else:
print("credential does not exist\n")
elif shortCode == 'dc':
print("Account name you would like to delete?")
account= input().lower().strip()
if Credentials.credentialExist(account):
Credentials.deleteCredential(account)
print("Account Successfully deleted")
else:
print("No such an account name")
elif shortCode == 'ex':
print("See you later!")
isTrue = False
else:
print("invalid")
main()
``` |
{
"source": "29ayush/simple_dqn",
"score": 3
} |
#### File: simple_dqn/src/agent.py
```python
import random
import logging
import numpy as np
logger = logging.getLogger(__name__)
from state_buffer import StateBuffer
class Agent:
def __init__(self, environment, replay_memory, deep_q_network, args):
self.env = environment
self.mem = replay_memory
self.net = deep_q_network
self.buf = StateBuffer(args)
self.num_actions = self.env.numActions()
self.random_starts = args.random_starts
self.history_length = args.history_length
self.exploration_rate_start = args.exploration_rate_start
self.exploration_rate_end = args.exploration_rate_end
self.exploration_decay_steps = args.exploration_decay_steps
self.exploration_rate_test = args.exploration_rate_test
self.total_train_steps = args.start_epoch * args.train_steps
self.train_frequency = args.train_frequency
self.train_repeat = args.train_repeat
self.target_steps = args.target_steps
self.callback = None
def _restartRandom(self):
self.env.restart()
# perform random number of dummy actions to produce more stochastic games
for i in xrange(random.randint(self.history_length, self.random_starts) + 1):
reward = self.env.act(0)
terminal = self.env.isTerminal()
if terminal:
self.env.restart()
screen = self.env.getScreen()
# add dummy states to buffer
self.buf.add(screen)
def _explorationRate(self):
# calculate decaying exploration rate
if self.total_train_steps < self.exploration_decay_steps:
return self.exploration_rate_start - self.total_train_steps * (self.exploration_rate_start - self.exploration_rate_end) / self.exploration_decay_steps
else:
return self.exploration_rate_end
def step(self, exploration_rate):
# exploration rate determines the probability of random moves
if random.random() < exploration_rate:
action = random.randrange(self.num_actions)
logger.debug("Random action = %d" % action)
else:
# otherwise choose action with highest Q-value
state = self.buf.getStateMinibatch()
# for convenience getStateMinibatch() returns minibatch
# where first item is the current state
qvalues = self.net.predict(state)
assert len(qvalues[0]) == self.num_actions
# choose highest Q-value of first state
action = np.argmax(qvalues[0])
logger.debug("Predicted action = %d" % action)
# perform the action
reward = self.env.act(action)
screen = self.env.getScreen()
terminal = self.env.isTerminal()
# print reward
if reward <> 0:
logger.debug("Reward: %d" % reward)
# add screen to buffer
self.buf.add(screen)
# restart the game if over
if terminal:
logger.debug("Terminal state, restarting")
self._restartRandom()
# call callback to record statistics
if self.callback:
self.callback.on_step(action, reward, terminal, screen, exploration_rate)
return action, reward, screen, terminal
def play_random(self, random_steps):
#call env.restart first so that env.reset is called before step.
self.env.restart()
# play given number of steps
for i in xrange(random_steps):
# use exploration rate 1 = completely random
action, reward, screen, terminal = self.step(1)
self.mem.add(action, reward, screen, terminal)
def train(self, train_steps, epoch = 0):
# do not do restart here, continue from testing
#self._restartRandom()
# play given number of steps
for i in xrange(train_steps):
# perform game step
action, reward, screen, terminal = self.step(self._explorationRate())
self.mem.add(action, reward, screen, terminal)
# Update target network every target_steps steps
if self.target_steps and i % self.target_steps == 0:
self.net.update_target_network()
# train after every train_frequency steps
if self.mem.count > self.mem.batch_size and i % self.train_frequency == 0:
# train for train_repeat times
for j in xrange(self.train_repeat):
# sample minibatch
minibatch = self.mem.getMinibatch()
# train the network
self.net.train(minibatch, epoch)
# increase number of training steps for epsilon decay
self.total_train_steps += 1
def test(self, test_steps, epoch = 0):
# just make sure there is history_length screens to form a state
self._restartRandom()
# play given number of steps
for i in xrange(test_steps):
# perform game step
self.step(self.exploration_rate_test)
def play(self, num_games):
# just make sure there is history_length screens to form a state
self._restartRandom()
for i in xrange(num_games):
# play until terminal state
terminal = False
while not terminal:
action, reward, screen, terminal = self.step(self.exploration_rate_test)
# add experiences to replay memory for visualization
self.mem.add(action, reward, screen, terminal)
```
#### File: src/nvis/data.py
```python
import h5py
import numpy as np
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32
#bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
#final_image = bokeh_img.reshape(img_h, img_w * (C+1)).view(np.uint32)
# put last 3 frames into separate color channels and add alpha channel
bokeh_img = np.dstack([img_data[:,:,1], img_data[:,:,2], img_data[:,:,3], 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * 4).view(np.uint32)
return final_image
def h5_deconv_data(f):
"""
Read deconv visualization data from hdf5 file.
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
if 'deconv' not in f.keys():
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in act_data.keys():
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
```
#### File: simple_dqn/src/state_buffer.py
```python
import numpy as np
class StateBuffer:
"""
While ReplayMemory could have been used for fetching the current state,
this also means that test time states make their way to training process.
Having separate StateBuffer ensures that test data doesn't leak into training.
"""
def __init__(self, args):
self.history_length = args.history_length
self.dims = (args.screen_height, args.screen_width)
self.batch_size = args.batch_size
self.buffer = np.zeros((self.batch_size, self.history_length) + self.dims, dtype=np.uint8)
def add(self, observation):
assert observation.shape == self.dims
self.buffer[0, :-1] = self.buffer[0, 1:]
self.buffer[0, -1] = observation
def getState(self):
return self.buffer[0]
def getStateMinibatch(self):
return self.buffer
def reset(self):
self.buffer *= 0
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--screen_width", type=int, default=40, help="Screen width after resize.")
parser.add_argument("--screen_height", type=int, default=52, help="Screen height after resize.")
parser.add_argument("--history_length", type=int, default=4, help="How many screen frames form a state.")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size for neural network.")
parser.add_argument("--loops", type=int, default=1000000, help="Number of loops in testing.")
args = parser.parse_args()
import numpy as np
mem = StateBuffer(args)
for i in xrange(args.loops):
mem.add(np.zeros((args.screen_height, args.screen_width)))
if i >= args.history_length:
state = mem.getState()
batch = mem.getStateMinibatch()
```
#### File: simple_dqn/src/statistics.py
```python
import sys
import csv
import time
import logging
import numpy as np
logger = logging.getLogger(__name__)
class Statistics:
def __init__(self, agent, net, mem, env, args):
self.agent = agent
self.net = net
self.mem = mem
self.env = env
self.agent.callback = self
self.net.callback = self
self.csv_name = args.csv_file
if self.csv_name:
logger.info("Results are written to %s" % args.csv_file)
self.csv_file = open(self.csv_name, "wb")
self.csv_writer = csv.writer(self.csv_file)
self.csv_writer.writerow((
"epoch",
"phase",
"steps",
"nr_games",
"average_reward",
"min_game_reward",
"max_game_reward",
"last_exploration_rate",
"total_train_steps",
"replay_memory_count",
"meanq",
"meancost",
"weight_updates",
"total_time",
"epoch_time",
"steps_per_second"
))
self.csv_file.flush()
self.start_time = time.clock()
self.validation_states = None
def reset(self):
self.epoch_start_time = time.clock()
self.num_steps = 0
self.num_games = 0
self.game_rewards = 0
self.average_reward = 0
self.min_game_reward = sys.maxint
self.max_game_reward = -sys.maxint - 1
self.last_exploration_rate = 1
self.average_cost = 0
# callback for agent
def on_step(self, action, reward, terminal, screen, exploration_rate):
self.game_rewards += reward
self.num_steps += 1
self.last_exploration_rate = exploration_rate
if terminal:
self.num_games += 1
self.average_reward += float(self.game_rewards - self.average_reward) / self.num_games
self.min_game_reward = min(self.min_game_reward, self.game_rewards)
self.max_game_reward = max(self.max_game_reward, self.game_rewards)
self.game_rewards = 0
def on_train(self, cost):
self.average_cost += (cost - self.average_cost) / self.net.train_iterations
def write(self, epoch, phase):
current_time = time.clock()
total_time = current_time - self.start_time
epoch_time = current_time - self.epoch_start_time
steps_per_second = self.num_steps / epoch_time
if self.num_games == 0:
self.num_games = 1
self.average_reward = self.game_rewards
if self.validation_states is None and self.mem.count > self.mem.batch_size:
# sample states for measuring Q-value dynamics
prestates, actions, rewards, poststates, terminals = self.mem.getMinibatch()
self.validation_states = prestates
if self.csv_name:
if self.validation_states is not None:
qvalues = self.net.predict(self.validation_states)
maxqs = np.max(qvalues, axis=1)
assert maxqs.shape[0] == qvalues.shape[0]
meanq = np.mean(maxqs)
else:
meanq = 0
self.csv_writer.writerow((
epoch,
phase,
self.num_steps,
self.num_games,
self.average_reward,
self.min_game_reward,
self.max_game_reward,
self.last_exploration_rate,
self.agent.total_train_steps,
self.mem.count,
meanq,
self.average_cost,
self.net.train_iterations,
total_time,
epoch_time,
steps_per_second
))
self.csv_file.flush()
logger.info(" num_games: %d, average_reward: %f, min_game_reward: %d, max_game_reward: %d" %
(self.num_games, self.average_reward, self.min_game_reward, self.max_game_reward))
logger.info(" last_exploration_rate: %f, epoch_time: %ds, steps_per_second: %d" %
(self.last_exploration_rate, epoch_time, steps_per_second))
def close(self):
if self.csv_name:
self.csv_file.close()
``` |
{
"source": "29chandu/Blog_django_graphql_api",
"score": 2
} |
#### File: Blog_django_graphql_api/blog/models.py
```python
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return f'{self.name}'
class Post(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(max_length=256)
publish_date = models.DateField(auto_now_add=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
# author = models.CharField(max_length=200)
def __str__(self):
return self.title
class Comment(models.Model):
text = models.CharField(max_length=150)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
# author = models.CharField(max_length=200)
def __str__(self):
return f'{self.text[:15]}... {self.author}'
``` |
{
"source": "29next/next-theme-kit",
"score": 3
} |
#### File: next-theme-kit/ntk/decorator.py
```python
import functools
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
def parser_config(*args, **kwargs):
"""Decorator for parser config values from command arguments."""
def _decorator(func):
@functools.wraps(func)
def _wrapper(self, parser, **func_kwargs):
for name, value in kwargs.items():
setattr(self.config, name, value)
self.config.parser_config(parser, write_file=kwargs.get('write_file', False))
self.gateway.store = self.config.store
self.gateway.apikey = self.config.apikey
func(self, parser, **func_kwargs)
return _wrapper
return _decorator
def check_error(error_format='{error_default} -> {error_msg}', response_json=True, **kwargs):
"""Decorator for check response error from request API"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(self, *func_args, **func_kwargs):
response = func(self, *func_args, **func_kwargs)
error_default = f'{func.__name__.capitalize().replace("_", " ")} of {self.store} failed.'
error_msg = ""
if response.ok and not response_json:
return response
elif response.ok and response.headers.get('content-type') == 'application/json':
return response
elif response.headers.get('content-type') == 'application/json':
result = response.json()
error_msg = " -> "
for key, value in result.items():
if type(value) == list:
error_msg += f'"{key}" : {" ".join(value)}'
else:
error_msg += value
error_log = error_format.format(
**vars(self), **func_kwargs, error_default=error_default, error_msg=error_msg
)
logging.info(f'{error_log}')
return response
return _wrapper
return _decorator
```
#### File: next-theme-kit/tests/test_config.py
```python
import unittest
from unittest.mock import MagicMock, mock_open, patch
from ntk.conf import Config
class TestConfig(unittest.TestCase):
def setUp(self):
config = {
'env': 'development',
'apikey': '<KEY>',
'store': 'http://simple.com',
'theme_id': 1
}
self.config = Config(**config)
@patch("yaml.load", autospec=True)
@patch("os.path.exists", autospec=True)
def test_read_config_file_with_config_file_should_be_read_data_correctly(self, mock_patch_exists, mock_load_yaml):
mock_patch_exists.return_value = True
mock_load_yaml.return_value = {
'development': {
'apikey': '<KEY>',
'store': 'http://example.com',
'theme_id': 1234
}
}
with patch('builtins.open', mock_open(read_data='yaml data')):
self.config.read_config()
self.assertEqual(self.config.apikey, '<KEY>')
self.assertEqual(self.config.store, 'http://example.com')
self.assertEqual(self.config.theme_id, 1234)
@patch("yaml.dump", autospec=True)
@patch("yaml.load", autospec=True)
@patch("os.path.exists", autospec=True)
def test_write_config_file_without_config_file_should_write_data_correctly(
self, mock_patch_exists, mock_load_yaml, mock_dump_yaml
):
mock_patch_exists.return_value = True
mock_dump_yaml.return_value = 'yaml data'
mock_load_yaml.return_value = {
'sandbox': {
'apikey': '<KEY>',
'store': 'http://sandbox.com',
'theme_id': 5678,
'sass': {
'output_style': 'nested'
}
}
}
self.config.apikey = '<KEY>'
self.config.store = 'http://example.com'
self.config.theme_id = 1234
self.config.sass_output_style = 'nested'
config = {
'sandbox': {
'apikey': '<KEY>',
'store': 'http://sandbox.com',
'theme_id': 5678,
'sass': {
'output_style': 'nested'
}
},
'development': {
'apikey': '<KEY>',
'store': 'http://example.com',
'theme_id': 1234,
'sass': {
'output_style': 'nested'
}
}
}
with patch('builtins.open', mock_open()):
with open('config.yml') as f:
self.config.write_config()
mock_dump_yaml.assert_called_once_with(config, f)
def test_validate_config_should_raise_expected_error(self):
with self.assertRaises(TypeError) as error:
self.config.apikey = None
self.config.store = 'http://example.com'
self.config.theme_id = 1234
self.config.validate_config()
self.assertEqual(str(error.exception), '[development] argument -a/--apikey is required.')
with self.assertRaises(TypeError) as error:
self.config.apikey = '<KEY>'
self.config.store = None
self.config.theme_id = 1234
self.config.validate_config()
self.assertEqual(str(error.exception), '[development] argument -s/--store is required.')
with self.assertRaises(TypeError) as error:
self.config.apikey = '<KEY>'
self.config.store = 'http://example.com'
self.config.theme_id = None
self.config.validate_config()
self.assertEqual(str(error.exception), '[development] argument -t/--theme_id is required.')
self.config.apikey = None
self.config.store = None
self.config.theme_id = None
with self.assertRaises(TypeError) as error:
self.config.validate_config()
self.assertEqual(str(error.exception),
'[development] argument -a/--apikey, -s/--store, -t/--theme_id are required.')
with self.assertRaises(TypeError) as error:
self.config.apikey_required = True
self.config.store_required = True
self.config.theme_id_required = False
self.config.validate_config()
self.assertEqual(str(error.exception), '[development] argument -a/--apikey, -s/--store are required.')
with self.assertRaises(TypeError) as error:
self.config.apikey = '<KEY>'
self.config.store = 'http://example.com'
self.config.sass_output_style = 'abc'
self.config.validate_config()
self.assertEqual(
str(error.exception),
(
'[development] argument -sos/--sass_output_style is unsupported '
'output_style; choose one of nested, expanded, compact, and compressed'
)
)
def test_save_config_should_validate_and_write_config_correctly(self):
with patch("ntk.conf.Config.write_config") as mock_write_config:
with patch("ntk.conf.Config.validate_config") as mock_validate_config:
self.config.save()
mock_validate_config.assert_called_once()
mock_write_config.assert_called_once()
with patch("ntk.conf.Config.write_config") as mock_write_config:
with patch("ntk.conf.Config.validate_config") as mock_validate_config:
self.config.save(write_file=False)
mock_validate_config.assert_called_once()
mock_write_config.assert_not_called()
def test_parser_config_should_set_config_config_correctly(self):
config = {
'env': 'sandbox',
'apikey': '<KEY>',
'store': 'http://sandbox.com',
'theme_id': 1234,
'sass_output_style': 'nested'
}
parser = MagicMock(**config)
with patch("ntk.conf.Config.write_config") as mock_write_config:
self.config.parser_config(parser=parser)
self.assertEqual(self.config.apikey, '<KEY>')
self.assertEqual(self.config.store, 'http://sandbox.com')
self.assertEqual(self.config.theme_id, 1234)
self.assertEqual(self.config.sass_output_style, 'nested')
mock_write_config.assert_not_called()
with patch("ntk.conf.Config.write_config") as mock_write_config:
self.config.parser_config(parser=parser, write_file=True)
self.assertEqual(self.config.apikey, '<KEY>')
self.assertEqual(self.config.store, 'http://sandbox.com')
self.assertEqual(self.config.theme_id, 1234)
self.assertEqual(self.config.sass_output_style, 'nested')
mock_write_config.assert_called_once()
``` |
{
"source": "29riyasaxena/MDF",
"score": 3
} |
#### File: examples/MDF/abcd_torch.py
```python
import sys
import torch
from torch import nn
import numpy as np
import abcd_python as abcd
in_size = 1
out_size = 1
#### A
A = nn.Linear(in_size, out_size)
with torch.no_grad():
A.weight[0][0] = abcd.A_slope
A.bias[0] = abcd.A_intercept
#### B
class MyLogistic(nn.Module):
def __init__(self, gain, bias, offset):
super().__init__()
self.gain = gain
self.bias = bias
self.offset = offset
def forward(self, input: torch.Tensor):
return 1 / (1 + torch.exp(-1 * self.gain * (input + self.bias) + self.offset))
B = MyLogistic(abcd.B_gain, abcd.B_bias, abcd.B_offset)
#### C
class MyExp(nn.Module):
def __init__(self, scale, rate, bias, offset):
super().__init__()
self.scale = scale
self.rate = rate
self.bias = bias
self.offset = offset
def forward(self, input: torch.Tensor):
return self.scale * torch.exp((self.rate * input) + self.bias) + self.offset
C = MyExp(abcd.C_scale, abcd.C_rate, abcd.C_bias, abcd.C_offset)
#### D
class MySin(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, input: torch.Tensor):
return self.scale * torch.sin(input)
D = MySin(abcd.D_scale)
m_a = nn.Sequential(A)
m_ab = nn.Sequential(A, B)
m_abc = nn.Sequential(A, B, C)
m_abcd = nn.Sequential(A, B, C, D)
print("Model: %s" % m_abcd)
# print(dir(m))
for i in abcd.test_values:
input = torch.ones(in_size) * i
output_a = m_a(input)
output_ab = m_ab(input)
output_abc = m_abc(input)
output_abcd = m_abcd(input)
print(
f"Output calculated by pytorch (input {input}) - A={'%f'%output_a}\tB={'%f'%output_ab}\tC={'%f'%output_abc}\tD={'%f'%output_abcd}\t"
)
# Export the model
fn = "ABCD_from_torch.onnx"
torch_out = torch.onnx._export(
m_abcd, # model being run
input, # model input (or a tuple for multiple inputs)
fn, # where to save the model (can be a file or file-like object)
export_params=True,
) # store the trained parameter weights inside the model file
print("Done! Exported to: %s" % fn)
import onnx
onnx_model = onnx.load(fn)
# print('Model: %s'%onnx_model)
def info(a):
print(f"Info: {a.name} ({a.type}), {a.shape}")
import onnxruntime as rt
sess = rt.InferenceSession(fn)
info(sess.get_inputs()[0])
info(sess.get_outputs()[0])
for i in abcd.test_values:
x = np.array([i], np.float32)
res = sess.run([sess.get_outputs()[0].name], {sess.get_inputs()[0].name: x})
print(f"Output calculated by onnxruntime (input: {x}): {res}")
print("Done! ONNX inference")
```
#### File: examples/MDF/states.py
```python
from modeci_mdf.mdf import *
import sys
def main():
mod = Model(id="States")
mod_graph = Graph(id="state_example")
mod.graphs.append(mod_graph)
## Counter node
counter_node = Node(id="counter_node")
p1 = Parameter(id="increment", value=1)
counter_node.parameters.append(p1)
p2 = Parameter(id="count", value="count + increment")
counter_node.parameters.append(p2)
op1 = OutputPort(id="out_port", value=p2.id)
counter_node.output_ports.append(op1)
mod_graph.nodes.append(counter_node)
## Sine node...
sine_node = Node(id="sine_node")
sine_node.parameters.append(Parameter(id="amp", value=3))
sine_node.parameters.append(Parameter(id="period", value=0.4))
s1 = Parameter(
id="level", default_initial_value=0, time_derivative="6.283185 * rate / period"
)
sine_node.parameters.append(s1)
s2 = Parameter(
id="rate",
default_initial_value=1,
time_derivative="-1 * 6.283185 * level / period",
)
sine_node.parameters.append(s2)
op1 = OutputPort(id="out_port", value="amp * level")
sine_node.output_ports.append(op1)
mod_graph.nodes.append(sine_node)
new_file = mod.to_json_file("%s.json" % mod.id)
new_file = mod.to_yaml_file("%s.yaml" % mod.id)
if "-run" in sys.argv:
verbose = True
# verbose = False
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.execution_engine import EvaluableGraph
eg = EvaluableGraph(mod_graph, verbose)
dt = 0.01
duration = 2
t = 0
recorded = {}
times = []
s = []
while t <= duration:
times.append(t)
print("====== Evaluating at t = %s ======" % (t))
if t == 0:
eg.evaluate() # replace with initialize?
else:
eg.evaluate(time_increment=dt)
s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].curr_value)
t += dt
if "-nogui" not in sys.argv:
import matplotlib.pyplot as plt
plt.plot(times, s)
plt.show()
if "-graph" in sys.argv:
mod.to_graph_image(
engine="dot",
output_format="png",
view_on_render=False,
level=3,
filename_root="states",
only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions
)
return mod_graph
if __name__ == "__main__":
main()
```
#### File: examples/PyTorch/run_translated_mlp_pure_mdf.py
```python
import json
import ntpath
from modeci_mdf.functions.standard import mdf_functions, create_python_expression
from typing import List, Tuple, Dict, Optional, Set, Any, Union
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.mdf import *
from modeci_mdf.full_translator import *
from modeci_mdf.execution_engine import EvaluableGraph
import argparse
import sys
import numpy as np
import sys
import h5py
import time
def main():
verbose = True
dt = 5e-05
file_path = "mlp_pure_mdf.json"
data = convert_states_to_stateful_parameters(file_path, dt)
# print(data)
with open("Translated_" + file_path, "w") as fp:
json.dump(data, fp, indent=4)
test_all = "-test" in sys.argv
mod_graph = load_mdf("Translated_%s" % file_path).graphs[0]
# mdf_to_graphviz(mod_graph,view_on_render=not test_all, level=3)
from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW
format = FORMAT_TENSORFLOW if "-tf" in sys.argv else FORMAT_NUMPY
eg = EvaluableGraph(mod_graph, verbose=False)
eg.evaluate(array_format=format)
print("Finished evaluating graph using array format %s" % format)
for n in [
"mlp_input_layer",
"mlp_relu_1",
"mlp_hidden_layer_with_relu",
"mlp_output_layer",
]:
out = eg.enodes[n].evaluable_outputs["out_port"].curr_value
print(f"Final output value of node {n}: {out}, shape: {out.shape}")
if "-graph" in sys.argv:
mod.to_graph_image(
engine="dot",
output_format="png",
view_on_render=False,
level=2,
filename_root="mlp_pure_mdf",
only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions
)
if test_all:
# Iterate on training data, feed forward and log accuracy
imgs = np.load("example_data/imgs.npy")
labels = np.load("example_data/labels.npy")
import torch.nn
matches = 0
imgs_to_test = imgs[:300]
start = time.time()
for i in range(len(imgs_to_test)):
ii = imgs[i, :, :]
target = labels[i]
img = torch.Tensor(ii).view(-1, 14 * 14).numpy()
# plot_img(img, 'Post_%i (%s)'%(i, img.shape))
print(
"***********\nTesting image %i (label: %s): %s\n%s"
% (i, target, np.array2string(img, threshold=5, edgeitems=2), img.shape)
)
# print(mod_graph.nodes[0].parameters['input'])
mod_graph.nodes[0].get_parameter("input").value = img
eg = EvaluableGraph(mod_graph, verbose=False)
eg.evaluate(array_format=format)
for n in ["mlp_output_layer"]:
out = eg.enodes[n].evaluable_outputs["out_port"].curr_value
print(
"Output of evaluated graph: %s %s (%s)"
% (out, out.shape, type(out).__name__)
)
prediction = np.argmax(out)
match = target == int(prediction)
if match:
matches += 1
print(f"Target: {target}, prediction: {prediction}, match: {match}")
t = time.time() - start
print(
"Matches: %i/%i, accuracy: %s%%. Total time: %.4f sec (%.4fs per run)"
% (
matches,
len(imgs_to_test),
(100.0 * matches) / len(imgs_to_test),
t,
t / len(imgs_to_test),
)
)
if __name__ == "__main__":
main()
```
#### File: actr/ccm/pattern.py
```python
import re
basestring = str
class PatternException(Exception):
pass
def get(obj, name, key):
if name is None:
a = obj
else:
a = obj[name]
while type(key) == str and "." in key:
key1, key = key.split(".", 1)
try:
a = a[key1]
except AttributeError:
a = getattr(a, key1)
try:
x = a[key]
except AttributeError:
x = getattr(a, key)
if isinstance(x, float):
x = "%g" % x
if not isinstance(x, str):
x = repr(x)
return x
def partialmatch(obj, name, key, b, value):
if type(key) == str and key[0] == "?":
key = b[key[1:]]
v = get(obj, name, key)
if v == value:
return True
# fix for early Python versions where True and False are actually 1 and 0
if value in ["True", "False"] and type(True) == int:
if v == str(bool(value)):
return True
pm = b.get("_partial", None)
if pm is not None:
x = pm.match(key, value, v)
obj._partial += x
return True
else:
return False
class Pattern:
def __init__(self, patterns, bound=None, partial=None):
self.funcs = parse(patterns, bound)
self.partial = partial
def match(self, obj):
b = {}
b["_partial"] = self.partial
if self.partial is not None:
obj._partial = 0.0
try:
for f in self.funcs:
if f(obj, b) == False:
return None
except (AttributeError, TypeError, KeyError):
return None
del b["_partial"]
return b
def parse(patterns, bound=None):
if not hasattr(patterns, "items"):
patterns = {None: patterns}
funcs = []
vars = {}
funcs2 = []
for name, pattern in patterns.items():
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
for p in pattern:
if p is None:
if name is None:
funcs.append(lambda x, b: x == None)
else:
funcs.append(
lambda x, b, name=name: x[name] == None or len(x[name]) == 0
)
elif callable(p):
if name is None:
def callfunc(x, b, name=name, p=p):
return p(x, b)
else:
def callfunc(x, b, name=name, p=p):
return p(x[name], b)
funcs2.append(callfunc)
elif isinstance(p, basestring):
namedSlots = False
for j, text in enumerate(p.split()):
key = j
m = re.match(r"([?]?[\w\.]+):", text)
if m != None:
key = m.group(1)
try:
key = int(key)
except ValueError:
pass
text = text[m.end() :]
if len(text) == 0:
raise PatternException(
"No value for slot '%s' in pattern '%s'"
% (key, pattern)
)
namedSlots = True
else:
if namedSlots != False:
raise PatternException(
"Found unnamed slot '%s' after named slot in pattern '%s'"
% (text, pattern)
)
if text == "?":
continue
while len(text) > 0:
m = re.match(r"([\w\.-]+)", text)
if m != None:
text = text[m.end() :]
t = m.group(1)
funcs.append(
lambda x, b, name=name, key=key, t=t: partialmatch(
x, name, key, b, t
)
)
continue
m = re.match(r"!([\w\.-]+)", text)
if m != None:
text = text[m.end() :]
t = m.group(1)
funcs.append(
lambda x, b, name=name, key=key, t=t: get(x, name, key)
!= t
)
continue
m = re.match(r"\?(\w+)", text)
if m != None:
text = text[m.end() :]
v = m.group(1)
if bound is not None and v in bound:
funcs.append(
lambda x, b, name=name, key=key, t=bound[
v
]: partialmatch(x, name, key, b, t)
)
elif v in vars:
funcs2.append(
lambda x, b, name=name, key=key, v=v: partialmatch(
x, name, key, b, b[v]
)
)
else:
vars[v] = (name, key)
def setfunc(x, b, name=name, key=key, v=v):
b[v] = get(x, name, key)
return True
funcs.append(setfunc)
continue
m = re.match(r"!\?(\w+)", text)
if m != None:
text = text[m.end() :]
v = m.group(1)
if bound is not None and v in bound:
funcs.append(
lambda x, b, name=name, key=key, t=bound[v]: get(
x, name, key
)
!= t
)
else:
funcs2.append(
lambda x, b, name=name, key=key, v=v: get(
x, name, key
)
!= b[v]
)
continue
raise PatternException(
f"Unknown text '{text}' in pattern '{pattern}'"
)
return funcs + funcs2
```
#### File: interfaces/onnx/importer.py
```python
import typing
import onnx
from onnx import (
ModelProto,
TensorProto,
GraphProto,
AttributeProto,
numpy_helper,
shape_inference,
)
from onnx.defs import get_schema
from modeci_mdf.mdf import *
def id_to_port(id: str):
"""Turn unique ONNX output and input value names into valid MDF input and outport names"""
new_name = str(id).replace(".", "_")
# If the first character is a digit, precede with an underscore so this can never be interpreted
# as number down the line.
if new_name[0].isdigit():
new_name = "_" + new_name
return new_name
def get_shape_params(shape: onnx.TensorShapeProto) -> typing.Tuple:
"""
Small helper function to extract a tuple from the TensorShapeProto. These objects
can contain both integer dimensions and parameter dimensions that are variable, like
'batch_size'.
Args:
shape: The ONNX shape proto to process.
Returns:
A tuple that can contain both integers and strings for parameter dimensions.
"""
shape = tuple(d.dim_param if d.dim_param != "" else d.dim_value for d in shape.dim)
# If shape is empty tuple, its a scalar, make it size 1
if len(shape) == 0:
shape = (1,)
return shape
def get_onnx_attribute(a):
# Use the helpers to get the appropriate value
val = onnx.helper.get_attribute_value(a)
# get_attribute_value() can return TensorProto's, lets convert them to a list for JSON
# FIXME: This begs the question, is JSON a good format for storing large tensors (nope)
if type(val) == TensorProto:
return numpy_helper.to_array(val).tolist()
else:
return val
def onnx_node_to_mdf(
node: typing.Union[onnx.NodeProto, onnx.ValueInfoProto],
onnx_initializer: typing.Dict[str, typing.Dict[str, typing.Any]],
) -> Node:
"""
Construct an MDF node (and function) from an ONNX NodeProto or ValueInfoProto
Args:
node: The ONNX node to use to form the MDF node. Can be a node from the model or
a ValueInfoProto specifying an input or output.
onnx_initializer: A specification of values in the graph that ONNX has
marked as initializer's. This dict is keyed on the name of the parameter,
the value is another dict with three entries; shape, type, and value.
Returns:
The equivalent MDF node for the ONNX node passed in as argument.
"""
# If this is a ONNX Node,
if type(node) == onnx.NodeProto:
# Create and MDF node with parameters
# FIXME: We need to preserve type info somewhere
params_dict = {a.name: get_onnx_attribute(a) for a in node.attribute}
# For any attributes that are sub-graphs, we need to recurse
for aname, val in params_dict.items():
if type(val) == GraphProto:
params_dict[aname] = onnx_to_mdf(val, onnx_initializer=onnx_initializer)
# If we have we have value constants that feed into this node. Make them parameters
# instead of input ports
non_constant_inputs = []
func_args = {}
for inp_i, inp in enumerate(node.input):
# Get the name of the formal argument that corresponds to this input.
# We need to go to the schema for this.
# FIXME: We need to make sure we are going the correct schema here ... yuck!
try:
arg_name = get_schema(node.op_type).inputs[inp_i].name
except IndexError:
arg_name = f"arg_{inp}"
if inp in onnx_initializer and "value" in onnx_initializer[inp]:
params_dict[arg_name] = onnx_initializer[inp]["value"]
func_args[arg_name] = arg_name
else:
non_constant_inputs.append(inp)
func_args[arg_name] = id_to_port(inp)
# FIXME: parameters must be set or we get JSON serialization error later
mdf_node = Node(id=node.name)
for p in params_dict:
if type(params_dict[p]) == Graph:
mdf_node.parameters.append(
Parameter(
id=p, value={"graph_%s" % params_dict[p].id: params_dict[p]}
)
)
else:
mdf_node.parameters.append(Parameter(id=p, value=params_dict[p]))
# Add the function
# FIXME: There is probably more stuff we need to preserve for ONNX Ops
func = Parameter(id=node.name, function=f"onnx::{node.op_type}", args=func_args)
mdf_node.parameters.append(func)
# Recreate inputs and outputs of ONNX node as InputPorts and OutputPorts
for inp in non_constant_inputs:
param_info = onnx_initializer.get(inp, None)
shape = param_info["shape"] if param_info else ""
ip = InputPort(id=id_to_port(inp), shape=shape)
mdf_node.input_ports.append(ip)
for out in node.output:
op = OutputPort(id=id_to_port(out), value=func.get_id())
mdf_node.output_ports.append(op)
elif type(node) == onnx.ValueInfoProto:
raise NotImplementedError()
# # Lets start with an MDF node that uses the ONNX node name as its id. No parameters
# mdf_node = Node(id=node.name)
#
# # This is an input or output node. No Op\Function or parameters. This is just
# # a simple pass through node with an input and output port with the correct
# # shape.
# # FIXME: Should this be necessary? ONNX treats input and output nodes as simple named values.
# ip1 = InputPort(id=f"in_port",
# shape=str(get_shape_params(node.type.tensor_type.shape))) # FIXME: Why string?
# mdf_node.input_ports.append(ip1)
# op1 = OutputPort(id=node.name)
# op1.value = f"in_port"
# mdf_node.output_ports.append(op1)
return mdf_node
def onnx_to_mdf(
onnx_model: typing.Union[ModelProto, GraphProto],
onnx_initializer: typing.Dict[str, typing.Dict[str, typing.Any]] = None,
):
"""
Convert a loaded ONNX model into a MDF model.
Args:
onnx_model: The ONNX model to convert. Typically, this is the result of a call to onnx.load()
onnx_initializer: A specification of values in the graph that ONNX has
marked as initializer's. This dict is keyed on the name of the parameter,
the value is another dict with three entries; shape, type, and value.
Returns:
An MDF description of the ONNX model.
"""
if onnx_initializer is None:
onnx_initializer = {}
if type(onnx_model) == ModelProto:
# Do shape inference on the model so we can get shapes of intermediate outputs
# FIXME: This function has side-effects, it probably shouldn't
try:
onnx_model = shape_inference.infer_shapes(onnx_model)
except RuntimeError:
pass
graph = onnx_model.graph
else:
graph = onnx_model
# Get all the nodes in the onnx model, even the inputs and outputs
onnx_nodes = list(graph.node)
if hasattr(graph, "initializer"):
# Parameters that have been initialized with values.
# FIXME: We need a cleaner way to extract this info.
onnx_initializer_t = {}
for t in graph.initializer:
t_np = numpy_helper.to_array(t)
onnx_initializer_t[t.name] = {"shape": t_np.shape, "type": str(t_np.dtype)}
# And the input and intermediate node shapes as well
for vinfo in list(graph.input) + list(graph.value_info):
vshape = get_shape_params(vinfo.type.tensor_type.shape)
try:
vtype = onnx.helper.printable_type(vinfo.type)
except AssertionError:
# Couldn't extract type
vtype = None
onnx_initializer_t[vinfo.name] = {"shape": vshape, "type": vtype}
onnx_initializer = {**onnx_initializer, **onnx_initializer_t}
# Finally, some nodes are constants, extract the values and drop the nodes.
# They will be removed in the MDF and passed as named parameters to the Node
constants = {}
onnx_nodes_nc = []
for onnx_node in onnx_nodes:
if onnx_node.op_type == "Constant":
v = get_onnx_attribute(onnx_node.attribute[0])
constants[onnx_node.output[0]] = {
"shape": v.shape if hasattr(v, "shape") else "(1,)",
"type": str(v.dtype) if hasattr(v, "dtype") else str(type(v)),
"value": v,
}
else:
onnx_nodes_nc.append(onnx_node)
onnx_nodes = onnx_nodes_nc
# Add constants to the initializer dict
onnx_initializer = {**onnx_initializer, **constants}
mod_graph = Graph(id=graph.name)
# Construct the equivalent nodes in MDF
mdf_nodes = [
onnx_node_to_mdf(node=node, onnx_initializer=onnx_initializer)
for node in onnx_nodes
]
mod_graph.nodes.extend(mdf_nodes)
# Construct the edges, we will do this by going through all the nodes.
node_pairs = list(zip(onnx_nodes, mod_graph.nodes))
for onnx_node, mdf_node in node_pairs:
if len(onnx_node.output) > 0:
for i, out in enumerate(onnx_node.output):
out_port_id = mdf_node.output_ports[i].id
# Find all node input ports with this outport id
# FIXME: This is slow for big graphs with lots of edges. Best to build a data structure for this.
receiver = [
(m, ip)
for n, m in node_pairs
for ip in m.input_ports
if out_port_id == ip.id
]
# Make an edge for each receiver of this output port
for receiver_node, receiver_port in receiver:
edge = Edge(
id=f"{mdf_node.id}.{out_port_id}_{receiver_node.id}.{receiver_port.id}",
sender=mdf_node.id,
sender_port=out_port_id,
receiver=receiver_node.id,
receiver_port=receiver_port.id,
)
mod_graph.edges.append(edge)
# If they passed an ONNX model, wrap the graph in a MDF model
if type(onnx_model) == ModelProto:
mod = Model(id="ONNX Model")
mod.graphs.append(mod_graph)
return mod
else:
return mod_graph
def find_subgraphs(
graph: onnx.GraphProto, graph_dict: typing.Dict[str, GraphProto] = None
) -> typing.Dict[str, GraphProto]:
"""
Recurse through an ONNX graph and find all subgraphs.
Args:
graph: The graph to search.
graph_list: Insert graphs we find into this dict. Use the parent node name as a key.
If None, intitialize to empty dict.
Returns:
All the subgraphs in the for the graph.
"""
if graph_dict is None:
graph_dict = {}
for node in graph.node:
for ai, attr in enumerate(node.attribute):
if attr.type == AttributeProto.GRAPH:
subgraph = onnx.helper.get_attribute_value(attr)
graph_dict[f"{node.name}_attr{ai}"] = subgraph
graph_dict = find_subgraphs(subgraph, graph_dict)
elif attr.type == AttributeProto.GRAPHS:
subgraphs = onnx.helper.get_attribute_value(attr)
for gi, subgraph in enumerate(subgraphs):
graph_dict[f"{node.name}_attr{ai}_g{gi}"] = subgraph
graph_dict = find_subgraphs(subgraph, graph_dict)
return graph_dict
def convert_file(input_file: str):
"""
Simple converter from ONNX to MDF. Takes in ONNX files and generates MDF JSON/YAML files.
Args:
input_file: The input file path to the ONNX file. Output files are generated in same
directory with -mdf.json and -mdf.yml extensions.
Returns:
MoneType
"""
import os
out_filename = f"{os.path.splitext(input_file)[0]}-mdf"
onnx_model = onnx.load(input_file)
onnx.checker.check_model(onnx_model)
mdf_model = onnx_to_mdf(onnx_model)
mdf_model.to_json_file(f"{out_filename}.json")
mdf_model.to_yaml_file(f"{out_filename}.yaml")
def main():
import argparse
parser = argparse.ArgumentParser(
description="Simple converter from ONNX to MDF. "
"Takes in ONNX files and generates MDF JSON/YAML"
)
parser = argparse.ArgumentParser()
parser.add_argument(
"input_file",
type=str,
help="An input ONNX file. "
"Output files are generated in same directory "
"with -mdf.json and -mdf.yml extensions.",
)
args = parser.parse_args()
convert_file(args.input_file)
if __name__ == "__main__":
main()
```
#### File: interfaces/pytorch/mod_torch_builtins.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class argmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.argmax(A)
class argmin(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.argmin(A)
class matmul(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.matmul(A, B.T)
class add(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.add(A, B)
class sin(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.sin(A)
class cos(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.cos(A)
class abs(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.abs(A)
class flatten(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.reshape(A, (1, -1))
class clip(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, min_val, max_val):
return torch.clamp(A, min_val, max_val)
class shape(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.tensor(A.size()).to(torch.int64)
class det(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.det(A)
class And(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_and(A > 0, B > 0)
class Or(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_or(A > 0, B > 0)
class Xor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_xor(A > 0, B > 0)
class concat(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, axis=0):
return torch.cat(A, axis)
class ceil(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.ceil(A)
class floor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.floor(A)
class bitshift(torch.nn.Module):
def __init__(self, DIR):
super().__init__()
self.dir = DIR
def forward(self, A, B):
if self.dir == "RIGHT":
return A.to(torch.int64) >> B.to(torch.int64)
else:
return A.to(torch.int64) << B.to(torch.int64)
class conv(torch.nn.Module):
def __init__(
self,
auto_pad="NOTSET",
kernel_shape=None,
group=1,
strides=[1, 1],
dilations=[1, 1],
pads=[0, 0, 0, 0],
):
super().__init__()
self.group = group
self.auto_pad = auto_pad
self.strides = tuple(strides)
self.dilations = tuple(dilations)
self.kernel_shape = kernel_shape
def forward(self, A, W, B=None):
if self.auto_pad == "NOTSET":
self.pads = tuple(pads)
elif self.auto_pad == "VALID":
self.pads = (0, 0, 0, 0)
elif self.auto_pad == "SAME_UPPER":
pad_dim1 = (
torch.ceil(torch.tensor(A.shape[2]).to(torch.float32) / strides[0])
.to(torch.int64)
.item()
)
pad_dim2 = (
torch.ceil(torch.tensor(A.shape[3]).to(torch.float32) / strides[1])
.to(torch.int64)
.item()
)
if pad_dim1 % 2 == 0 and pad_dim2 % 2 == 0:
self.pads = (pad_dim1 // 2, pad_dim1 // 2, pad_dim2 // 2, pad_dim2 // 2)
elif pad_dim1 % 2 == 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2,
pad_dim2 // 2,
pad_dim2 // 2 + 1,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 == 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2 + 1,
pad_dim2 // 2,
pad_dim2 // 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2 + 1,
pad_dim2 // 2,
pad_dim2 // 2 + 1,
)
elif self.auto_pad == "SAME_LOWER":
pad_dim1 = (
torch.ceil(torch.tensor(A.shape[2]).to(torch.float32) / strides[0])
.to(torch.int64)
.item()
)
pad_dim2 = (
torch.ceil(torch.tensor(A.shape[3]).to(torch.float32) / strides[1])
.to(torch.int64)
.item()
)
if pad_dim1 % 2 == 0 and pad_dim2 % 2 == 0:
self.pads = (pad_dim1 // 2, pad_dim1 // 2, pad_dim2 // 2, pad_dim2 // 2)
elif pad_dim1 % 2 == 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2,
pad_dim2 // 2 + 1,
pad_dim2 // 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 == 0:
self.pads = (
pad_dim1 // 2 + 1,
pad_dim1 // 2,
pad_dim2 // 2,
pad_dim2 / 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2 + 1,
pad_dim1 // 2,
pad_dim2 // 2 + 1,
pad_dim2 // 2,
)
A = F.pad(A, self.pads)
return F.conv2d(
A,
W,
bias=B,
stride=self.strides,
padding=self.pads,
dilation=self.dilations,
groups=self.group,
)
class elu(torch.nn.Module):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, A):
return nn.ELU(alpha=self.alpha)(A.to(torch.float32))
class hardsigmoid(torch.nn.Module):
def __init__(self, alpha=0.2, beta=0.5):
super().__init__()
self.alpha = alpha
self.beta = beta
def forward(self, A):
return torch.clamp(self.alpha * (A.to(torch.float32)) + self.beta, 0, 1)
class hardswish(torch.nn.Module):
def __init__(self):
super().__init__()
self.alpha = 1.0 / 6
self.beta = 0.5
def forward(self, A):
return A * torch.clamp(self.alpha * (A.to(torch.float32)) + self.beta, 0, 1)
class hardmax(torch.nn.Module):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def forward(self, A):
A = A.to(torch.float32)
rank = A.shape
if self.axis < 0:
self.axis += len(rank)
tensor = torch.arange(rank[self.axis])
repeats = []
repeats.append(1)
for i, idx in enumerate(reversed(rank[: self.axis])):
repeats.append(1)
tensor = torch.stack([tensor] * idx)
for i, idx in enumerate(rank[self.axis + 1 :]):
repeats.append(idx)
tensor = tensor.unsqueeze(-1).repeat(repeats)
repeats[-1] = 1
# b = torch.stack([torch.stack([torch.arange(4)] * 3)] *2)
# print(tensor.shape)
max_values, _ = torch.max(A, dim=self.axis)
# print(max_values, max_values.shape)
# tensor = torch.reshape(tensor, tuple(rank))
tensor[A != torch.unsqueeze(max_values, dim=self.axis)] = rank[self.axis]
# print(b)
first_max, _ = torch.min(tensor, dim=self.axis)
one_hot = torch.nn.functional.one_hot(first_max, rank[self.axis])
return one_hot
class compress(torch.nn.Module):
def __init__(self, axis=None):
self.axis = axis
super().__init__()
def forward(self, A, B):
idx = (B.to(torch.bool) != 0).nonzero().reshape(-1)
if self.axis != None:
return torch.index_select(A, self.axis, idx)
else:
return torch.index_select(A.reshape(-1), 0, idx)
# TODO: Many more to be implemented
__all__ = [
"argmax",
"argmin",
"matmul",
"add",
"sin",
"cos",
"abs",
"flatten",
"clip",
"shape",
"det",
"And",
"Or",
"Xor",
"concat",
"ceil",
"floor",
"bitshift",
"conv",
"elu",
"hardsigmoid",
"hardswish",
"compress",
]
``` |
{
"source": "29rj/Fusion",
"score": 2
} |
#### File: applications/academic_information/views.py
```python
import datetime
import json
import os
import xlrd
import logging
from io import BytesIO
from xlsxwriter.workbook import Workbook
from xhtml2pdf import pisa
from itertools import chain
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.template.loader import get_template
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from applications.academic_procedures.models import MinimumCredits, Register, InitialRegistration, course_registration, AssistantshipClaim,Assistantship_status
from applications.globals.models import (Designation, ExtraInfo,
HoldsDesignation, DepartmentInfo)
from .forms import AcademicTimetableForm, ExamTimetableForm, MinuteForm
from .models import (Calendar, Course, Exam_timetable, Grades, Curriculum_Instructor,Constants,
Meeting, Student, Student_attendance, Timetable,Curriculum)
from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester, Programme, Discipline)
from applications.academic_procedures.views import acad_proced_global_context
from applications.programme_curriculum.models import Batch
@login_required
def user_check(request):
"""
This function is used to check the type of user.
It checkes the authentication of the user.
@param:
request - contains metadata about the requested page
@variables:
current_user - get user from request
user_details - extract details of user from database
desig_id - check for designation
acadadmin - designation for Acadadmin
final_user - final designation of request user
"""
try:
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
except Exception as e:
acadadmin=""
final_user=""
pass
if (str(acadadmin) != str(final_user)):
return True
else:
return False
def get_context(request):
"""
This function gets basic gata from database to send to template
@param:
request - contains metadata about the requested page
@variables:
acadTtForm - the form to add academic calender
examTtForm - the form required to add exam timetable
exam_t - all the exam timetable objects
timetable - all the academic timetable objects
calendar - all the academic calender objects
context - the datas to be displayed in the webpage
this_sem_course - tha data of thsi semester courses
next_sem_courses - the data of next semester courses
courses - all the courses in curriculum
course_type - list the type of courses
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
course_list = sem_for_generate_sheet()
if(course_list[0]==1):
course_list_2 = [2, 4, 6, 8]
else:
course_list_2 = [1, 3, 5, 7]
# examTtForm = ExamTimetableForm()
# acadTtForm = AcademicTimetableForm()
# calendar = Calendar.objects.all()
# this_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True)
# next_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True)
# courses = Course.objects.all()
# course_type = Constants.COURSE_TYPE
# timetable = Timetable.objects.all()
# exam_t = Exam_timetable.objects.all()
procedures_context = acad_proced_global_context()
try:
examTtForm = ExamTimetableForm()
acadTtForm = AcademicTimetableForm()
calendar = Calendar.objects.all()
this_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list).filter(floated=True)
next_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list_2).filter(floated=True)
courses = Course.objects.all()
courses_list = Courses.objects.all()
course_type = Constants.COURSE_TYPE
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
pgstudent = Student.objects.filter(programme = "M.Tech") | Student.objects.filter(programme = "PhD")
assistant_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(acad_approval = False)
assistant_approve_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(hod_approval = True)
assistant_list_length = len(assistant_list.filter(acad_approval = False))
assis_stat = Assistantship_status.objects.all()
for obj in assis_stat:
assistant_flag = obj.student_status
hod_flag = obj.hod_status
account_flag = obj.account_status
except Exception as e:
examTtForm = ""
acadTtForm = ""
calendar = ""
this_sem_courses = ""
next_sem_courses = ""
courses = ""
course_type = ""
timetable = ""
exam_t = ""
pass
context = {
'acadTtForm': acadTtForm,
'examTtForm': examTtForm,
'courses': courses,
'courses_list': courses_list,
'course_type': course_type,
'exam': exam_t,
'timetable': timetable,
'academic_calendar': calendar,
'next_sem_course': next_sem_courses,
'this_sem_course': this_sem_courses,
'curriculum': curriculum,
'pgstudent' : pgstudent,
'assistant_list' : assistant_list,
'assistant_approve_list' : assistant_approve_list,
'assistant_list_length' : assistant_list_length,
'tab_id': ['1','1'],
'context': procedures_context['context'],
'lists': procedures_context['lists'],
'date': procedures_context['date'],
'query_option1': procedures_context['query_option1'],
'query_option2': procedures_context['query_option2'],
'course_verification_date' : procedures_context['course_verification_date'],
'submitted_course_list' : procedures_context['submitted_course_list'],
'result_year' : procedures_context['result_year'],
'batch_grade_data' : procedures_context['batch_grade_data'],
'batch_branch_data' : procedures_context['batch_branch_data'],
'assistant_flag' : assistant_flag,
'hod_flag' : hod_flag,
'account_flag' : account_flag
}
return context
@login_required
def homepage(request):
"""
This function is used to set up the homepage of the application.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
senates - the extraInfo objects that holds the designation as a senator
students - all the objects in the Student class
Convenor - the extraInfo objects that holds the designation as a convenor
CoConvenor - the extraInfo objects that holds the designation as a coconvenor
meetings - the all meeting objects held in senator meetings
minuteForm - the form to add a senate meeting minutes
acadTtForm - the form to add academic calender
examTtForm - the form required to add exam timetable
Dean - the extraInfo objects that holds the designation as a dean
student - the students as a senator
extra - all the extraInfor objects
exam_t - all the exam timetable objects
timetable - all the academic timetable objects
calendar - all the academic calender objects
department - all the departments in the college
attendance - all the attendance objects of the students
context - the datas to be displayed in the webpage
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context = get_context(request)
return render(request, "ais/ais.html", context)
# ####################################
# # curriculum #
# ####################################
@login_required
def curriculum(request):
"""
This function is used to see curriculum and edit entries in a curriculum.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
curriculum - Get data about curriculum from database
courses - get courses from database
courses_type - get course types from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context = get_context(request)
context['tab_id'][0]='6'
if request.method == 'POST':
try:
request_batch = request.POST['batch']
request_branch = request.POST['branch']
request_programme = request.POST['programme']
request_sem = request.POST['sem']
except Exception as e:
request_batch = ""
request_branch = ""
request_programme = ""
request_sem = ""
#for checking if the user has searched for any particular curriculum
if request_batch == "" and request_branch == "" and request_programme=="" and request_sem=="":
curriculum = None #Curriculum.objects.all()
else:
if int(request_sem) == 0:
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).order_by('sem')
else:
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem= request_sem)
# context={
# 'courses' : courses,
# 'course_type' : course_type,
# 'curriculum' : curriculum,
# 'tab_id' :['3','1']
# }
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
html = render_to_string('ais/curr_list.html',{'curriculum':curriculum,'courses':courses,'course_type':course_type},request)
obj = json.dumps({'html':html})
#return render(request, "ais/ais.html", context)
return HttpResponse(obj,content_type='application/json')
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def add_curriculum(request):
"""
This function is used to add new curriculum in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
batch - batch from form.REQUEST
branch - branch from form.REQUEST
sem - semester from form.REQUEST
course_code - course_code from form.REQUEST
course_name - course-name from form.REQUEST
course_id - course_id from database
credits - credits from form.REQUEST
optional - optional from form.REQUEST
course_type - course_type from form.REQUEST
ins - data is stored in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','2']
}
if request.method == 'POST':
i=0
new_curr=[]
while True:
if "semester_"+str(i) in request.POST:
try:
programme=request.POST['AddProgramme']
batch=request.POST['AddBatch']
branch=request.POST['AddBranch']
sem=request.POST["semester_"+str(i)]
course_code=request.POST["course_code_"+str(i)]
course_name=request.POST["course_name_"+str(i)]
course_id=Course.objects.get(course_name=course_name)
credits=request.POST["credits_"+str(i)]
if "optional_"+str(i) in request.POST:
optional=True
else:
optional=False
course_type=request.POST["course_type_"+str(i)]
except Exception as e:
programme=""
batch=""
branch=""
sem=""
course_code=""
course_name=""
course_id=""
credits=""
optional=""
course_type=""
pass
ins=Curriculum(
programme=programme,
batch=batch,
branch=branch,
sem=sem,
course_code=course_code,
course_id=course_id,
credits=credits,
optional=optional,
course_type=course_type,
)
new_curr.append(ins)
else:
break
i+=1
Curriculum.objects.bulk_create(new_curr)
curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme)
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','2']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def edit_curriculum(request):
"""
This function is used to edit curriculum in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
batch - batch from form.REQUEST
branch - branch from form.REQUEST
sem - semester from form.REQUEST
course_code - course_code from form.REQUEST
course_name - course-name from form.REQUEST
course_id - course_id from database
credits - credits from form.REQUEST
optional - optional from form.REQUEST
course_type - course_type from form.REQUEST
ins - data is stored in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','1']
}
if request.method == 'POST':
try:
id=request.POST['id']
programme=request.POST['programme']
batch=request.POST['batch']
branch=request.POST['branch']
sem=request.POST["sem"]
course_code=request.POST["course_code"]
course_name=request.POST["course_id"]
course_id=Course.objects.get(course_name=course_name)
credits=request.POST["credits"]
if request.POST['optional'] == "on":
optional=True
else:
optional=False
course_type=request.POST["course_type"]
except Exception as e:
id=""
programme=""
batch=""
branch=""
sem=""
course_code=""
course_name=""
course_id=""
credits=""
optional=""
course_type=""
pass
entry=Curriculum.objects.all().select_related().filter(curriculum_id=id).first()
entry.programme=programme
entry.batch=batch
entry.branch=branch
entry.sem=sem
entry.course_code=course_code
entry.course_id=course_id
entry.credits=credits
entry.optional=optional
entry.course_type=course_type
entry.save()
curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme)
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def delete_curriculum(request):
"""
This function is used to delete curriculum entry in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
dele - data being deleted from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','1']
}
if request.method == "POST":
dele = Curriculum.objects.select_related().filter(curriculum_id=request.POST['id'])
dele.delete()
curriculum = Curriculum.objects.select_related().filter(branch = request.POST['branch']).filter(batch = request.POST['batch']).filter(programme= request.POST['programme'])
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
return render(request, 'ais/ais.html', context)
@login_required
def next_curriculum(request):
"""
This function is used to decide curriculum for new batch.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
now - current date from system
year - current year
batch - batch form form
curriculum - curriculum details form database
ins - Inster data in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == 'POST':
programme = request.POST['programme']
now = datetime.datetime.now()
year = int(now.year)
batch = year-1
curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme)
if request.POST['option'] == '1':
new_curriculum=[]
for i in curriculum:
ins=Curriculum(
programme=i.programme,
batch=i.batch+1,
branch=i.branch,
sem=i.sem,
course_code=i.course_code,
course_id=i.course_id,
credits=i.credits,
optional=i.optional,
course_type=i.course_type,
)
new_curriculum.append(ins)
Curriculum.objects.bulk_create(new_curriculum)
elif request.POST['option'] == '2':
new_curriculum=[]
for i in curriculum:
ins=Curriculum(
programme=i.programme,
batch=i.batch+1,
branch=i.branch,
sem=i.sem,
course_code=i.course_code,
course_id=i.course_id,
credits=i.credits,
optional=i.optional,
course_type=i.course_type,
)
new_curriculum.append(ins)
Curriculum.objects.bulk_create(new_curriculum)
batch=batch+1
curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme)
context= {
'curriculumm' :curriculum,
'tab_id' :['3','3']
}
return render(request, "ais/ais.html", context)
else:
context= {
'tab_id' :['3','2']
}
return render(request, "ais/ais.html", context)
context= {
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
@login_required
def add_timetable(request):
"""
acad-admin can upload the time table(any type of) of the semester.
@param:
request - contains metadata about the requested page.
@variables:
acadTtForm - data of delete dictionary in post request
timetable - all timetable from database
exam_t - all exam timetable from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
context= {
'exam': exam_t,
'timetable': timetable,
'tab_id' :['10','1']
}
acadTtForm = AcademicTimetableForm()
if request.method == 'POST' and request.FILES:
acadTtForm = AcademicTimetableForm(request.POST, request.FILES)
if acadTtForm.is_valid():
acadTtForm.save()
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def add_exam_timetable(request):
"""
acad-admin can upload the exam timtable of the ongoing semester.
@param:
request - contains metadata about the requested page.
@variables:
examTtForm - data of delete dictionary in post request
timetable - all timetable from database
exam_t - all exam timetable from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
context= {
'exam': exam_t,
'timetable': timetable,
'tab_id' :['10','2']
}
examTtForm = ExamTimetableForm()
if request.method == 'POST' and request.FILES:
examTtForm = ExamTimetableForm(request.POST, request.FILES)
if examTtForm.is_valid():
examTtForm.save()
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def delete_timetable(request):
"""
acad-admin can delete the outdated timetable from the server.
@param:
request - contains metadata about the requested page.
@variables:
data - data of delete dictionary in post request
t - Object of time table to be deleted
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
data = request.POST['delete']
t = Timetable.objects.get(time_table=data)
t.delete()
return HttpResponse("TimeTable Deleted")
@login_required
def delete_exam_timetable(request):
"""
acad-admin can delete the outdated exam timetable.
@param:
request - contains metadata about the requested page.
@variables:
data - data of delete dictionary in post request
t - Object of Exam time table to be deleted
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
data = request.POST['delete']
t = Exam_timetable.objects.get(exam_time_table=data)
t.delete()
return HttpResponse("TimeTable Deleted")
@login_required
def add_calendar(request):
"""
to add an entry to the academic calendar to be uploaded
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
c = object to save new event to the academic calendar.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
except Exception as e:
from_date=""
to_date=""
desc=""
pass
c = Calendar(
from_date=from_date,
to_date=to_date,
description=desc)
c.save()
HttpResponse("Calendar Added")
return render(request, "ais/ais.html", context)
@login_required
def update_calendar(request):
"""
to update an entry to the academic calendar to be updated.
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
prev_desc - Description for the previous event which is to be updated.
get_calendar_details = Get the object of the calendar instance from the database for the previous Description.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
prev_desc = request.POST.getlist('prev_desc')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first()
get_calendar_details.description = desc
get_calendar_details.from_date = from_date
get_calendar_details.to_date = to_date
get_calendar_details.save()
except Exception as e:
from_date=""
to_date=""
desc=""
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
#Generate Attendance Sheet
def sem_for_generate_sheet():
"""
This function generates semester grade sheet
@variables:
now - current datetime
month - current month
"""
now = datetime.datetime.now()
month = int(now.month)
if month >= 7 and month <= 12:
return [1, 3, 5, 7]
else:
return [2, 4, 6, 8]
@login_required
def generatexlsheet(request):
"""
to generate Course List of Registered Students
@param:
request - contains metadata about the requested page
@variables:
batch - gets the batch
course - gets the course
curr_key - gets the curriculum from database
obj - get stdents data from database
ans - Formatted Array to be converted to xlsx
k -temporary array to add data to formatted array/variable
output - io Bytes object to write to xlsx file
book - workbook of xlsx file
title - formatting variable of title the workbook
subtitle - formatting variable of subtitle the workbook
normaltext - formatting variable for normal text
sheet - xlsx sheet to be rendered
titletext - formatting variable of title text
dep - temporary variables
z - temporary variables for final output
b - temporary variables for final output
c - temporary variables for final output
st - temporary variables for final output
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
try:
batch = request.POST['batch']
course = Courses.objects.get(id = request.POST['course'])
obj = course_registration.objects.all().filter(course_id = course)
except Exception as e:
batch=""
course=""
curr_key=""
obj=""
registered_courses = []
for i in obj:
if i.student_id.batch_id.year == int(batch):
registered_courses.append(i)
ans = []
for i in registered_courses:
k = []
k.append(i.student_id.id.id)
k.append(i.student_id.id.user.first_name)
k.append(i.student_id.id.user.last_name)
k.append(i.student_id.id.department)
ans.append(k)
ans.sort()
output = BytesIO()
book = Workbook(output,{'in_memory':True})
title = book.add_format({'bold': True,
'font_size': 22,
'align': 'center',
'valign': 'vcenter'})
subtitle = book.add_format({'bold': True,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
normaltext = book.add_format({'bold': False,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
sheet = book.add_worksheet()
title_text = ((str(course.name)+" : "+str(str(batch))))
sheet.set_default_row(25)
sheet.merge_range('A2:E2', title_text, title)
sheet.write_string('A3',"Sl. No",subtitle)
sheet.write_string('B3',"Roll No",subtitle)
sheet.write_string('C3',"Name",subtitle)
sheet.write_string('D3',"Discipline",subtitle)
sheet.write_string('E3','Signature',subtitle)
sheet.set_column('A:A',20)
sheet.set_column('B:B',20)
sheet.set_column('C:C',60)
sheet.set_column('D:D',15)
sheet.set_column('E:E',30)
k = 4
num = 1
for i in ans:
sheet.write_number('A'+str(k),num,normaltext)
num+=1
z,b,c = str(i[0]),i[1],i[2]
name = str(b)+" "+str(c)
temp = str(i[3]).split()
dep = str(temp[len(temp)-1])
sheet.write_string('B'+str(k),z,normaltext)
sheet.write_string('C'+str(k),name,normaltext)
sheet.write_string('D'+str(k),dep,normaltext)
k+=1
book.close()
output.seek(0)
response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel')
st = 'attachment; filename = ' + course.code + '.xlsx'
response['Content-Disposition'] = st
return response
@login_required
def generate_preregistration_report(request):
"""
to generate preresgistration report after pre-registration
@param:
request - contains metadata about the requested page
@variables:
sem - get current semester from current time
now - get current time
year - getcurrent year
batch - gets the batch from form
sem - stores the next semester
obj - All the registration details appended into one
data - Formated data for context
m - counter for Sl. No (in formated data)
z - temporary array to add data to variable data
k -temporary array to add data to formatted array/variable
output - io Bytes object to write to xlsx file
book - workbook of xlsx file
title - formatting variable of title the workbook
subtitle - formatting variable of subtitle the workbook
normaltext - formatting variable for normal text
sheet - xlsx sheet to be rendered
titletext - formatting variable of title text
dep - temporary variables
z - temporary variables for final output
b - temporary variables for final output
c - temporary variables for final output
st - temporary variables for final output
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
sem = request.POST.get('semester_no')
batch_id=request.POST.get('batch_branch')
batch = Batch.objects.filter(id = batch_id).first()
obj = InitialRegistration.objects.filter(student_id__batch_id=batch_id, semester_id__semester_no=sem)
registered_students = set()
unregistered_students = set()
for stu in obj:
registered_students.add(stu.student_id)
students = Student.objects.filter(batch_id = batch_id)
for stu in students:
if stu not in registered_students:
unregistered_students.add(stu)
data = []
m = 1
for i in unregistered_students:
z = []
z.append(m)
m += 1
z.append(i.id.user.username)
z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name))
z.append(i.id.department.name)
z.append('not registered')
data.append(z)
for i in registered_students:
z = []
z.append(m)
m += 1
z.append(i.id.user.username)
z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name))
z.append(i.id.department.name)
z.append('registered')
data.append(z)
output = BytesIO()
book = Workbook(output,{'in_memory':True})
title = book.add_format({'bold': True,
'font_size': 22,
'align': 'center',
'valign': 'vcenter'})
subtitle = book.add_format({'bold': True,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
normaltext = book.add_format({'bold': False,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
sheet = book.add_worksheet()
title_text = ("Pre-registeration : "+ batch.name + str(" ") + batch.discipline.acronym + str(" ") + str(batch.year))
sheet.set_default_row(25)
sheet.merge_range('A2:E2', title_text, title)
sheet.write_string('A3',"Sl. No",subtitle)
sheet.write_string('B3',"Roll No",subtitle)
sheet.write_string('C3',"Name",subtitle)
sheet.write_string('D3',"Discipline",subtitle)
sheet.write_string('E3','Status',subtitle)
sheet.set_column('A:A',20)
sheet.set_column('B:B',20)
sheet.set_column('C:C',50)
sheet.set_column('D:D',15)
sheet.set_column('E:E',15)
k = 4
num = 1
for i in data:
sheet.write_number('A'+str(k),num,normaltext)
num+=1
z,b,c = str(i[0]),i[1],i[2]
a,b,c,d,e = str(i[0]),str(i[1]),str(i[2]),str(i[3]),str(i[4])
temp = str(i[3]).split()
sheet.write_string('B'+str(k),b,normaltext)
sheet.write_string('C'+str(k),c,normaltext)
sheet.write_string('D'+str(k),d,normaltext)
sheet.write_string('E'+str(k),e,normaltext)
k+=1
book.close()
output.seek(0)
response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel')
st = 'attachment; filename = ' + batch.name + batch.discipline.acronym + str(batch.year) + '-preresgistration.xlsx'
response['Content-Disposition'] = st
return response
@login_required
def add_new_profile (request):
"""
To add details of new upcoming students in the database.User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
profiles - gets the excel file having data
excel - excel file
sheet - sheet no in excel file
roll_no - details of student from file
first_name - details of student from file
last_name - details of student from file
email - details of student from file
sex - details of student from file
title - details of student from file
dob - details of student from file
fathers_name - details of student from file
mothers_name - details of student from file
category - details of student from file
phone_no - details of student from file
address - details of student from file
department - details of student from file
specialization - details of student from file
hall_no - details of student from file
programme - details of student from file
batch - details of student from file
user - new user created in database
einfo - new extrainfo object created in database
stud_data - new student object created in database
desig - get designation object of student
holds_desig - get hold_desig object of student
currs - get curriculum details
reg - create registeration object in registeration table
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['2','1']
}
if request.method == 'POST' and request.FILES:
profiles=request.FILES['profiles']
excel = xlrd.open_workbook(file_contents=profiles.read())
sheet=excel.sheet_by_index(0)
for i in range(sheet.nrows):
roll_no=int(sheet.cell(i,0).value)
first_name=str(sheet.cell(i,1).value)
last_name=str(sheet.cell(i,2).value)
email=str(sheet.cell(i,3).value)
sex=str(sheet.cell(i,4).value)
if sex == 'F':
title='Ms.'
else:
title='Mr.'
dob_tmp=sheet.cell(i,5).value
dob_tmp=sheet.cell_value(rowx=i,colx=5)
dob=datetime.datetime(*xlrd.xldate_as_tuple(dob_tmp,excel.datemode))
fathers_name=str(sheet.cell(i,6).value)
mothers_name=str(sheet.cell(i,7).value)
category=str(sheet.cell(i,8).value)
phone_no=int(sheet.cell(i,9).value)
address=str(sheet.cell(i,10).value)
dept=str(sheet.cell(i,11).value)
specialization=str(sheet.cell(i,12).value)
hall_no=sheet.cell(i,13 ).value
department=DepartmentInfo.objects.all().filter(name=dept).first()
if specialization == "":
specialization="None"
if hall_no == None:
hall_no=3
else:
hall_no=int(hall_no)
programme_name=request.POST['Programme']
batch_year=request.POST['Batch']
batch = Batch.objects.all().filter(name = programme_name, discipline__acronym = dept, year = batch_year).first()
user = User.objects.create_user(
username=roll_no,
password='<PASSWORD>',
first_name=first_name,
last_name=last_name,
email=email,
)
einfo = ExtraInfo.objects.create(
id=roll_no,
user=user,
title=title,
sex=sex,
date_of_birth=dob,
address=address,
phone_no=phone_no,
user_type='student',
department=department,
)
sem=1
stud_data = Student.objects.create(
id=einfo,
programme = programme_name,
batch=batch_year,
batch_id = batch,
father_name = fathers_name,
mother_name = mothers_name,
cpi = 0,
category = category,
hall_no = hall_no,
specialization = specialization,
curr_semester_no=sem,
)
desig = Designation.objects.get(name='student')
hold_des = HoldsDesignation.objects.create(
user=user,
working=user,
designation=desig,
)
sem_id = Semester.objects.get(curriculum = batch.curriculum, semester_no = sem)
course_slots = CourseSlot.objects.all().filter(semester = sem_id)
courses = []
for course_slot in course_slots:
courses += course_slot.courses.all()
new_reg=[]
for c in courses:
reg=course_registration(
course_id = c,
semester_id=sem_id,
student_id=stud_data
)
new_reg.append(reg)
course_registration.objects.bulk_create(new_reg)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
def get_faculty_list():
"""
to get faculty list from database
@param:
request - contains metadata about the requested page.
@variables:
f1,f2,f3 - temporary varibles
faculty - details of faculty of data
faculty_list - list of faculty
"""
try:
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
except Exception as e:
f1=f2=f3=""
pass
faculty = list(chain(f1,f2,f3))
faculty_list = []
for i in faculty:
faculty_list.append(i)
return faculty_list
@login_required
def float_course(request):
"""
to float courses for the next sem and store data in databsae.
User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['5','1']
}
if request.method == 'POST':
try:
request_batch = request.POST['batch']
request_branch = request.POST['branch']
request_programme = request.POST['programme']
except Exception as e:
request_batch = ""
request_branch = ""
request_programme = ""
if request_batch == "" and request_branch == "" and request_programme=="":
curriculum = None #Curriculum.objects.all()
else:
sem = sem_for_generate_sheet()
now = datetime.datetime.now()
year = int(now.year)
if sem[0] == 2:
sem = sem[year-int(request_batch)-1]
else:
sem = sem[year-int(request_batch)]
sem+=1
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem=sem).order_by('course_code')
faculty_list = get_faculty_list()
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'faculty_list': faculty_list,
'tab_id' :['5','1']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def float_course_submit(request):
"""
to float courses for the next sem and store data in databsae.
User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['5','1']
}
if request.method == "POST":
i=1
while True:
if str(i)+"_ccode" in request.POST:
if str(i)+"_fac" in request.POST:
if request.POST[str(i)+"_fac"] == "" :
logging.warning("No faculty")
else:
flot = Curriculum.objects.select_related().get(curriculum_id=request.POST[str(i)+"_ccode"])
flot.floated = True
flot.save()
new_curr_inst=[]
for c,i in enumerate(request.POST.getlist(str(i)+'_fac')):
inst = get_object_or_404(User, username = i)
inst = ExtraInfo.objects.select_related('user','department').get(user=inst)
if c==0:
ins=Curriculum_Instructor(
curriculum_id=flot,
instructor_id=inst,
chief_inst=True,
)
new_curr_inst.append(ins)
else:
ins=Curriculum_Instructor(
curriculum_id=flot,
instructor_id=inst,
chief_inst=False,
)
new_curr_inst.append(ins)
Curriculum_Instructor.objects.bulk_create(new_curr_inst)
else:
break
i+=1
return render(request, "ais/ais.html", context)
# # ---------------------senator------------------
# @csrf_exempt
def senator(request):
# """
# to add a new student senator
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - gets the data of current user.
# user_details - gets the details of the required user.
# desig_id - used to check the designation ID.
# extraInfo - extraInfo object of the student with that rollno
# s - designation object of senator
# hDes - holdsDesignation object to store that the particualr student is holding the senator designation
# student - the student object of the new senator
# data - data of the student to be displayed in teh webpage
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
#print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == 'POST':
# print(request.POST, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# rollno = request.POST.getlist('Roll Number')[0]
# # print(request.POST.get('rollno'))
# extraInfo = ExtraInfo.objects.get(id=rollno)
# s = Designation.objects.get(name='Senator')
# hDes = HoldsDesignation()
# hDes.user = extraInfo.user
# hDes.working = extraInfo.user
# hDes.designation = s
# hDes.save()
# student = Student.objects.get(id=extraInfo)
# data = {
# 'name': extraInfo.user.username,
# 'rollno': extraInfo.id,
# 'programme': student.programme,
# 'branch': extraInfo.department.name
# }
# return HttpResponseRedirect('/aims/')
# # return JsonResponse(data)
# else:
# return HttpResponseRedirect('/aims/')
# @csrf_exempt
def deleteSenator(request, pk):
# """
# to remove a senator from the position
# @param:
# request - contains metadata about the requested page
# @variables:
# s - the designation object that contains senator
# student - the list students that is a senator
# hDes - the holdDesignation object that stores the
# information that the particular student is a senator
# """
pass
# if request.POST:
# s = get_object_or_404(Designation, name="Senator")
# student = get_object_or_404(ExtraInfo, id=request.POST.getlist("senate_id")[0])
# hDes = get_object_or_404( HoldsDesignation, user = student.user)
# hDes.delete()
# return HttpResponseRedirect('/aims/')
# else:
# return HttpResponseRedirect('/aims/')# ####################################################
# # ##########covenors and coconvenors##################
# @csrf_exempt
def add_convenor(request):
# """
# to add a new student convenor/coconvenor
# @param:
# request - contains metadata about the requested page
# @variables:
# rollno - rollno of the student to become the convenor/coconvenor
# extraInfo - extraInfo object of the student with that rollno
# s - designation object of Convenor
# p - designation object of Co Convenor
# result - the data that contains where the student will become
# convenor or coconvenor
# hDes - holdsDesignation object to store that the particualr student is
# holding the convenor/coconvenor designation
# student - the student object of the new convenor/coconvenor
# data - data of the student to be displayed in the webpage
# """
s = Designation.objects.get(name='Convenor')
# p = Designation.objects.get(name='Co Convenor')
# if request.method == 'POST':
# rollno = request.POST.get('rollno_convenor')
# extraInfo = ExtraInfo.objects.get(id=rollno)
# s = Designation.objects.get(name='Convenor')
# p = Designation.objects.get(name='Co Convenor')
# result = request.POST.get('designation')
# hDes = HoldsDesignation()
# hDes.user = extraInfo.user
# hDes.working = extraInfo.user
# if result == "Convenor":
# hDes.designation = s
# else:
# hDes.designation = p
# hDes.save()
# data = {
# 'name': extraInfo.user.username,
# 'rollno_convenor': extraInfo.id,
# 'designation': hDes.designation.name,
# }
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# @csrf_exempt
def deleteConvenor(request, pk):
# """
# to remove a convenor/coconvenor from the position
# @param:
# request - contains metadata about the requested page
# pk - the primary key of that particular student field
# @variables:
# s - the designation object that contains convenor
# c - the designation object that contains co convenor
# student - the student object with the given pk
# hDes - the holdDesignation object that stores the
# information that the particular student is a convenor/coconvenor to be deleted
# data - data of the student to be hidden in the webpage
# """
# s = get_object_or_404(Designation, name="Convenor")
c = get_object_or_404(Designation, name="Co Convenor")
# student = get_object_or_404(ExtraInfo, id=pk)
# hDes = HoldsDesignation.objects.filter(user = student.user)
# designation = []
# for des in hDes:
# if des.designation == s or des.designation == c:
# designation = des.designation.name
# des.delete()
# data = {
# 'id': pk,
# 'designation': designation,
# }
# return JsonResponse(data)# ######################################################
# # ##########Senate meeting Minute##################
# @csrf_exempt
def addMinute(request):
# """
# to add a new senate meeting minute object to the database.
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - details of the current user.
# desig_id - to check the designation of the user.
# user_details - to get the details of the required user.
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == 'POST' and request.FILES:
# form = MinuteForm(request.POST, request.FILES)
# if form.is_valid():
# form.save()
# return HttpResponse('sucess')
# else:
# return HttpResponse('not uploaded')
# return render(request, "ais/ais.html", {})
def deleteMinute(request):
# """
# to delete an existing senate meeting minute object from the database.
# @param:
# request - contains metadata about the requested page
# @variables:
# data - the id of the minute object to be deleted
# t - the minute object received from id to be deleted
# """
# if request.method == "POST":
# data = request.POST['delete']
# t = Meeting.objects.get(id=data)
# t.delete()
return HttpResponseRedirect('/aims/')
# # ######################################################
# # ##########Student basic profile##################
# @csrf_exempt
def add_basic_profile(request):
# """
# It adds the basic profile information like username,password, name,
# rollno, etc of a student
# @param:
# request - contains metadata about the requested page
# @variables:
# name - the name of the student
# roll - the rollno of the student
# batch - the current batch of the student
# programme - the programme the student is enrolled in
# ph - the phone number of the student
# """
if request.method == "POST":
name = request.POST.get('name')
# roll = ExtraInfo.objects.get(id=request.POST.get('rollno'))
# programme = request.POST.get('programme')
# batch = request.POST.get('batch')
# ph = request.POST.get('phoneno')
# if not Student.objects.filter(id=roll).exists():
# db = Student()
# st = ExtraInfo.objects.get(id=roll.id)
# db.name = name.upper()
# db.id = roll
# db.batch = batch
# db.programme = programme
# st.phone_no = ph
# db.save()
# st.save()
# data = {
# 'name': name,
# 'rollno': roll.id,
# 'programme': programme,
# 'phoneno': ph,
# 'batch': batch
# }
# print(data)
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# @csrf_exempt
def delete_basic_profile(request, pk):
# """
# Deletes the student from the database
# @param:
# request - contains metadata about the requested page
# pk - the primary key of the student's record in the database table
# @variables:
# e - the extraInfo objects of the student
# user - the User object of the student
# s - the student object of the student
# """
e = get_object_or_404(ExtraInfo, id=pk)
# user = get_object_or_404(User, username = e.user.username)
# s = get_object_or_404(Student, id=e)
# data = {
# 'rollno': pk,
# }
# s.delete()
# e.delete()
# u.delete()
# return JsonResponse(data)# #########################################################
# '''
# # view to add attendance data to database
# def curriculum(request):
# '''
def delete_advanced_profile(request):
# """
# to delete the advance information of the student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - the username of the logged in user
# user_details - the details of the current user
# desig_id - checking the designation of the current user
# acadadmin - deatils of the acad admin
# s - the student object from the requested rollno
# """
current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# st = request.POST['delete']
# arr = st.split("-")
# stu = arr[0]
# if Student.objects.get(id=stu):
# s = Student.objects.get(id=stu)
# s.father_name = ""
# s.mother_name = ""
# s.hall_no = 1
# s.room_no = ""
# s.save()
# else:
# return HttpResponse("Data Does Not Exist")
# return HttpResponse("Data Deleted Successfully")
def add_advanced_profile(request):
# """
# It adds the advance profile information like hall no, room no,
# profile picture, about me etc of a student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - the username of the logged in user
# user_details - the details of the current user
# desig_id - checking the designation of the current user
# acadadmin - deatils of the acad admin
# father - father's name of the student
# rollno - the rollno of the student required to check if the student is available
# mother - mother's name of the student
# add - student's address
# cpi - student's cpi
# hall - hall no of where the student stays
# room no - hostel room no
# """
current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# print(request.POST)
# rollno=request.POST.get('roll')
# print(rollno)
# student = ExtraInfo.objects.get(id=rollno)
# print(student.address)
# if not student:
# data = {}
# return JsonResponse(data)
# else:
# father = request.POST.get('father')
# mother = request.POST.get('mother')
# add = request.POST.get('address')
# hall = request.POST.get('hall')
# room = request.POST.get('room')
# cpi = request.POST.get('cpi')
# student.address = str(hall) + " " + str(room)
# student.save()
# s = Student.objects.get(id=student)
# s.father_name=father
# s.mother_name=mother
# s.hall_no = hall
# s.room_no = room
# s.save()
# return HttpResponseRedirect('/academic-procedures/')
# return HttpResponseRedirect('/academic-procedures/')
def add_optional(request):
# """
# acadmic admin to update the additional courses
# @param:
# request - contains metadata about the requested page.
# @variables:
# choices - selected addtional courses by the academic person.
# course - Course details which is selected by the academic admin.
# """
if request.method == "POST":
pass
# print(request.POST)
# choices = request.POST.getlist('choice')
# for i in choices:
# course = Course.objects.all().filter(course_id=i).first()
# course.acad_selection = True
# course.save()
# courses = Course.objects.all()
# for i in courses:
# if i.course_id not in choices:
# i.acad_selection = False
# i.save()
# return HttpResponseRedirect('/academic-procedures/')
def min_cred(request):
# """
# to set minimum credit for a current semester that a student must take
# @param:
# request - contains metadata about the requested page.
# @variables:
# sem_cred = Get credit details from forms and the append it to an array.
# sem - Get the object for the minimum credits from the database and the update it.
# """
if request.method=="POST":
sem_cred = []
# sem_cred.append(0)
# for i in range(1, 10):
# sem = "sem_"+"1"
# sem_cred.append(request.POST.getlist(sem)[0])
# for i in range(1, 9):
# sem = MinimumCredits.objects.all().filter(semester=i).first()
# sem.credits = sem_cred[i+1]
# sem.save()
# return HttpResponse("Worked")
def view_course(request):
# if request.method == "POST":
# programme=request.POST['programme']
# batch=request.POST['batch']
# branch=request.POST['branch']
# sem=request.POST['sem']
# curriculum_courses = Curriculum.objects.filter(branch = branch).filter(batch = batch).filter(programme= programme).filter(sem = sem)
# print(curriculum_courses)
# courses = Course.objects.all()
# course_type = Constants.COURSE_TYPE
# context= {
# 'courses': courses,
# 'course_type': course_type,
# 'curriculum_course': curriculum_courses,
# }
# return render(request, "ais/ais.html", context)
# else:
# return render(request, "ais/ais.html")
return render(request, "ais/ais.html")
def delete_grade(request):
# """
# It deletes the grade of the student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - father's name of the student
# user_details - the rollno of the student required to check if the student is available
# desig_id - mother 's name of the student
# acadadmin - student's address
# final_user - details of the user
# sem - current semester of the student
# data - tag whether to delete it or not
# course - get the course details
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# print(request.POST['delete'])
# data = request.POST['delete']
# d = data.split("-")
# id = d[0]
# course = d[2]
# sem = int(d[3])
# if request.method == "POST":
# if(Grades.objects.filter(student_id=id, sem=sem)):
# s = Grades.objects.filter(student_id=id, sem=sem)
# for p in s:
# if (str(p.course_id) == course):
# print(p.course_id)
# p.delete()
# else:
# return HttpResponse("Unable to delete data")
return HttpResponse("Data Deleted SuccessFully")
@login_required
def verify_grade(request):
"""
It verify the grades of the student
@param:
request - contains metadata about the requested page
@variables:
current_user - father's name of the student
user_details - the rollno of the student required to check if the student is available
desig_id - mother's name of the student
acadadmin - student's address
subject - subject of which the grade has to be added
sem - semester of the student
grade - grade to be added in the student
course - course ofwhich the grade is added
"""
# if user_check(request):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# curr_id=request.POST['course']
# print(curr_id)
# curr_course = Curriculum.objects.filter(curriculum_id=curr_id)
# grades = Grades.objects.filter(curriculum_id=curr_course)
# context= {
# 'grades': grades,
# 'tab_id' :"2"
# }
# return render(request,"ais/ais.html", context)
# else:
# return HttpResponseRedirect('/aims/')
return HttpResponseRedirect('/aims/')
def confirm_grades(request):
# if user_check(request):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# print("confirm hone wala hai")
# print(request.POST)
return HttpResponseRedirect('/aims/')
```
#### File: academic_procedures/api/views.py
```python
import datetime
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404, redirect
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes,authentication_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from applications.academic_information.models import Curriculum
from applications.academic_procedures.models import ThesisTopicProcess
from applications.globals.models import HoldsDesignation, Designation, ExtraInfo
from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester)
from applications.academic_procedures.views import (get_user_semester, get_acad_year,
get_currently_registered_courses,
get_current_credits, get_branch_courses,
Constants, get_faculty_list,
get_registration_courses, get_add_course_options,
get_pre_registration_eligibility,
get_final_registration_eligibility,
get_add_or_drop_course_date_eligibility)
from . import serializers
User = get_user_model()
date_time = datetime.datetime.now()
@api_view(['GET'])
def academic_procedures_faculty(request):
current_user = request.user
user_details = current_user.extrainfo
des = current_user.holds_designations.all().first()
if str(des.designation) == 'student':
return Response({'error':'Not a faculty'}, status=status.HTTP_400_BAD_REQUEST)
elif str(current_user) == 'acadadmin':
return Response({'error':'User is acadadmin'}, status=status.HTTP_400_BAD_REQUEST)
elif str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor":
faculty_object = user_details.faculty
month = int(date_time.month)
sem = []
if month>=7 and month<=12:
sem = [1,3,5,7]
else:
sem = [2,4,6,8]
student_flag = False
fac_flag = True
thesis_supervision_request_list = faculty_object.thesistopicprocess_supervisor.all()
thesis_supervision_request_list_data = serializers.ThesisTopicProcessSerializer(thesis_supervision_request_list, many=True).data
approved_thesis_request_list = serializers.ThesisTopicProcessSerializer(thesis_supervision_request_list.filter(approval_supervisor = True), many=True).data
pending_thesis_request_list = serializers.ThesisTopicProcessSerializer(thesis_supervision_request_list.filter(pending_supervisor = True), many=True).data
courses_list = serializers.CurriculumInstructorSerializer(user_details.curriculum_instructor_set.all(), many=True).data
fac_details = serializers.UserSerializer(current_user).data
resp = {
'student_flag' : student_flag,
'fac_flag' : fac_flag,
'thesis_supervision_request_list' : thesis_supervision_request_list_data,
'pending_thesis_request_list' : pending_thesis_request_list,
'approved_thesis_request_list' : approved_thesis_request_list,
'courses_list': courses_list,
'faculty': fac_details
}
return Response(data=resp, status=status.HTTP_200_OK)
@api_view(['GET'])
def academic_procedures_student(request):
current_user = request.user
current_user_data = {
'first_name': current_user.first_name,
'last_name': current_user.last_name,
'username': current_user.username,
'email': current_user.email
}
user_details = current_user.extrainfo
des = current_user.holds_designations.all().first()
if str(des.designation) == 'student':
obj = user_details.student
if obj.programme.upper() == "PH.D":
student_flag = True
ug_flag = False
masters_flag = False
phd_flag = True
fac_flag = False
des_flag = False
elif obj.programme.upper() == "M.DES":
student_flag = True
ug_flag = False
masters_flag = True
phd_flag = False
fac_flag = False
des_flag = True
elif obj.programme.upper() == "B.DES":
student_flag = True
ug_flag = True
masters_flag = False
phd_flag = False
fac_flag = False
des_flag = True
elif obj.programme.upper() == "M.TECH":
student_flag = True
ug_flag = False
masters_flag = True
phd_flag = False
fac_flag = False
des_flag = False
elif obj.programme.upper() == "B.TECH":
student_flag = True
ug_flag = True
masters_flag = False
phd_flag = False
fac_flag = False
des_flag = False
else:
return Response({'message':'Student has no record'}, status=status.HTTP_400_BAD_REQUEST)
current_date = date_time.date()
current_year = date_time.year
batch = obj.batch_id
user_sem = get_user_semester(request.user, ug_flag, masters_flag, phd_flag)
acad_year = get_acad_year(user_sem, current_year)
user_branch = user_details.department.name
cpi = obj.cpi
cur_spi='Sem results not available' # To be fetched from db if result uploaded
details = {
'current_user': current_user_data,
'year': acad_year,
'user_sem': user_sem,
'user_branch' : str(user_branch),
'cpi' : cpi,
'spi' : cur_spi
}
currently_registered_courses = get_currently_registered_courses(user_details.id, user_sem)
currently_registered_courses_data = serializers.CurriculumSerializer(currently_registered_courses, many=True).data
try:
pre_registered_courses = obj.initialregistrations_set.all().filter(semester = user_sem)
pre_registered_courses_show = obj.initialregistrations_set.all().filter(semester = user_sem+1)
except:
pre_registered_courses = None
try:
final_registered_courses = obj.finalregistrations_set.all().filter(semester = user_sem)
except:
final_registered_courses = None
pre_registered_courses_data = serializers.InitialRegistrationsSerializer(pre_registered_courses, many=True).data
pre_registered_courses_show_data = serializers.InitialRegistrationsSerializer(pre_registered_courses_show, many=True).data
final_registered_courses_data = serializers.FinalRegistrationsSerializer(final_registered_courses, many=True).data
current_credits = get_current_credits(currently_registered_courses)
next_sem_branch_courses = get_branch_courses(current_user, user_sem+1, user_branch)
next_sem_branch_courses_data = serializers.CurriculumSerializer(next_sem_branch_courses, many=True).data
fee_payment_mode_list = dict(Constants.PaymentMode)
next_sem_branch_registration_courses = get_registration_courses(next_sem_branch_courses)
next_sem_branch_registration_courses_data = []
for choices in next_sem_branch_registration_courses:
next_sem_branch_registration_courses_data.append(serializers.CurriculumSerializer(choices, many=True).data)
# next_sem_branch_registration_courses_data = serializers.CurriculumSerializer(next_sem_branch_registration_courses, many=True).data
final_registration_choices = get_registration_courses(get_branch_courses(request.user, user_sem, user_branch))
final_registration_choices_data = []
for choices in final_registration_choices:
final_registration_choices_data.append(serializers.CurriculumSerializer(choices, many=True).data)
performance_list = []
result_announced = False
for i in currently_registered_courses:
try:
performance_obj = obj.semestermarks_set.all().filter(curr_id = i).first()
except:
performance_obj = None
performance_list.append(performance_obj)
performance_list_data = serializers.SemesterMarksSerializer(performance_list, many=True).data
thesis_request_list = serializers.ThesisTopicProcessSerializer(obj.thesistopicprocess_set.all(), many=True).data
pre_existing_thesis_flag = True if obj.thesistopicprocess_set.all() else False
current_sem_branch_courses = get_branch_courses(current_user, user_sem, user_branch)
pre_registration_date_flag = get_pre_registration_eligibility(current_date)
final_registration_date_flag = get_final_registration_eligibility(current_date)
add_or_drop_course_date_flag = get_add_or_drop_course_date_eligibility(current_date)
student_registration_check_pre = obj.studentregistrationcheck_set.all().filter(semester=user_sem+1)
student_registration_check_final = obj.studentregistrationcheck_set.all().filter(semester=user_sem)
pre_registration_flag = False
final_registration_flag = False
if(student_registration_check_pre):
pre_registration_flag = student_registration_check_pre.pre_registration_flag
if(student_registration_check_final):
final_registration_flag = student_registration_check_final.final_registration_flag
teaching_credit_registration_course = None
if phd_flag:
teaching_credit_registration_course = Curriculum.objects.all().filter(batch = 2016, sem =6)
teaching_credit_registration_course_data = serializers.CurriculumSerializer(teaching_credit_registration_course, many=True).data
if student_flag:
try:
due = obj.dues_set.get()
lib_d = due.library_due
pc_d = due.placement_cell_due
hos_d = due.hostel_due
mess_d = due.mess_due
acad_d = due.academic_due
except:
lib_d, pc_d, hos_d, mess_d, acad_d = 0, 0, 0, 0, 0
tot_d = lib_d + acad_d + pc_d + hos_d + mess_d
registers = obj.register_set.all()
course_list = []
for i in registers:
course_list.append(i.curr_id)
attendence = []
for i in course_list:
instructors = i.curriculum_instructor_set.all()
pr,ab=0,0
for j in list(instructors):
presents = obj.student_attendance_set.all().filter(instructor_id=j, present=True)
absents = obj.student_attendance_set.all().filter(instructor_id=j, present=False)
pr += len(presents)
ab += len(absents)
attendence.append((i,pr,pr+ab))
attendance_data = {}
for course in attendence:
attendance_data[course[0].course_id.course_name] = {
'present' : course[1],
'total' : course[2]
}
branchchange_flag = False
if user_sem == 2:
branchchange_flag=True
# faculty_list = serializers.HoldsDesignationSerializer(get_faculty_list(), many=True).data
resp = {
'details': details,
'currently_registered': currently_registered_courses_data,
'pre_registered_courses' : pre_registered_courses_data,
'pre_registered_courses_show' : pre_registered_courses_show_data,
'final_registered_courses' : final_registered_courses_data,
'current_credits' : current_credits,
'courses_list': next_sem_branch_courses_data,
'fee_payment_mode_list' : fee_payment_mode_list,
'next_sem_branch_registration_courses' : next_sem_branch_registration_courses_data,
'final_registration_choices' : final_registration_choices_data,
'performance_list' : performance_list_data,
'thesis_request_list' : thesis_request_list,
'student_flag' : student_flag,
'ug_flag' : ug_flag,
'masters_flag' : masters_flag,
'phd_flag' : phd_flag,
'fac_flag' : fac_flag,
'des_flag' : des_flag,
'thesis_flag' : pre_existing_thesis_flag,
'drop_courses_options' : currently_registered_courses_data,
'pre_registration_date_flag': pre_registration_date_flag,
'final_registration_date_flag': final_registration_date_flag,
'add_or_drop_course_date_flag': add_or_drop_course_date_flag,
'pre_registration_flag' : pre_registration_flag,
'final_registration_flag': final_registration_flag,
'teaching_credit_registration_course' : teaching_credit_registration_course_data,
'lib_d':lib_d,
'acad_d':acad_d,
'mess_d':mess_d,
'pc_d':pc_d,
'hos_d':hos_d,
'tot_d':tot_d,
'attendance': attendance_data,
'Branch_Change_Flag':branchchange_flag
# 'faculty_list' : faculty_list
}
return Response(data=resp, status=status.HTTP_200_OK)
@api_view(['POST'])
def add_thesis(request):
current_user = request.user
profile = current_user.extrainfo
if profile.user_type == 'student':
if not 'thesis_topic' in request.data:
return Response({'error':'Thesis topic is required'}, status=status.HTTP_400_BAD_REQUEST)
if not 'research_area' in request.data:
return Response({'error':'Research area is required'}, status=status.HTTP_400_BAD_REQUEST)
if 'supervisor_id' in request.data:
try:
supervisor_faculty = User.objects.get(username=request.data['supervisor_id'])
supervisor_faculty = supervisor_faculty.extrainfo
request.data['supervisor_id'] = supervisor_faculty
except:
return Response({'error':'Wrong supervisor id. User does not exist.'}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'error':'supervisor id is required'}, status=status.HTTP_400_BAD_REQUEST)
if 'co_supervisor_id' in request.data:
try:
co_supervisor_faculty = User.objects.get(username=request.data['co_supervisor_id'])
co_supervisor_faculty = co_supervisor_faculty.extrainfo
request.data['co_supervisor_id'] = co_supervisor_faculty
except:
return Response({'error':'Wrong co_supervisor id. User does not exist.'}, status=status.HTTP_400_BAD_REQUEST)
else:
co_supervisor_faculty = None
if 'curr_id' in request.data:
curr_id = None
student = profile.student
request.data['student_id'] = profile
request.data['submission_by_student'] = True
serializer = serializers.ThesisTopicProcessSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'error':'Cannot add thesis'}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['PUT'])
def approve_thesis(request, id):
current_user = request.user
profile = current_user.extrainfo
if profile.user_type == 'faculty':
try:
thesis = ThesisTopicProcess.objects.get(id=id)
except:
return Response({'error':'This thesis does not exist'}, status=status.HTTP_400_BAD_REQUEST)
if 'member1' in request.data:
try:
user1 = User.objects.get(username=request.data['member1'])
member1 = user1.extrainfo
request.data['member1'] = member1
except:
return Response({'error':'Wrong username of member 1. User does not exist.'}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'error':'Member 1 is required'}, status=status.HTTP_400_BAD_REQUEST)
if 'member2' in request.data:
try:
user2 = User.objects.get(username=request.data['member2'])
member2 = user2.extrainfo
request.data['member2'] = member2
except:
return Response({'error':'Wrong username of member 2. User does not exist.'}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'error':'Member 2 is required'}, status=status.HTTP_400_BAD_REQUEST)
if 'member3' in request.data:
try:
user3 = User.objects.get(username=request.data['member3'])
member3 = user3.extrainfo
request.data['member3'] = member3
except:
return Response({'error':'Wrong username of member 3. User does not exist.'}, status=status.HTTP_400_BAD_REQUEST)
else:
member3 = None
if not 'approval' in request.data:
return Response({'error':'Approval value is required.'}, status=status.HTTP_400_BAD_REQUEST)
elif request.data['approval'] != 'yes' and request.data['approval'] != 'no':
return Response({'error':'Wrong approval value provided. Approval value should be yes or no'}, status=status.HTTP_400_BAD_REQUEST)
if request.data['approval'] == 'yes':
request.data.pop('approval', None)
request.data['pending_supervisor'] = False
request.data['approval_supervisor'] = True
request.data['forwarded_to_hod'] = True
request.data['pending_hod'] = True
else:
request.data.pop('approval', None)
request.data['pending_supervisor'] = False
request.data['approval_supervisor'] = False
request.data['forwarded_to_hod'] = False
request.data['pending_hod'] = False
serializer = serializers.ThesisTopicProcessSerializer(thesis, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'error':'Cannot approve thesis'}, status=status.HTTP_400_BAD_REQUEST)
```
#### File: applications/counselling_cell/models.py
```python
from django.db import models
from applications.academic_information.models import Student
from django.contrib.auth.models import User
from applications.globals.models import Faculty,ExtraInfo
from datetime import datetime,date
# Create your models here.
class CounsellingCellConstants :
STUDENT_POSTIONS= (
('student_guide', 'Student Guide'),
('student_coordinator', 'Student Coordinator'),
)
FACULTY_POSTIONS= (
('head_counsellor', 'Head Counsellor'),
('faculty_counsellor', 'Faculty Counsellor'),
)
ISSUE_STATUS= (
('status_unresolved', 'Unresolved'),
('status_resolved', 'Resolved'),
('status_inprogress', 'InProgress'),
)
TIME = (
('10', '10 a.m.'),
('11', '11 a.m.'),
('12', '12 p.m.'),
('13', '1 p.m.'),
('14', '2 p.m.'),
('15', '3 p.m.'),
('16', '4 p.m.'),
('17', '5 p.m.'),
('18', '6 p.m.'),
('19', '7 p.m.'),
('20', '8 p.m.'),
('21', '9 p.m.')
)
MEETING_STATUS = (
('status_accepted',"Accepted"),
('status_pending','Pending')
)
class FacultyCounsellingTeam(models.Model):
faculty = models.ForeignKey(Faculty, on_delete=models.CASCADE)
faculty_position = models.CharField(max_length=50,choices=CounsellingCellConstants.FACULTY_POSTIONS)
class Meta:
unique_together = (('faculty', 'faculty_position'))
def __str__(self):
return f"{self.faculty} - {self.faculty_position}"
class StudentCounsellingTeam(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE)
student_position = models.CharField(max_length=50,choices=CounsellingCellConstants.STUDENT_POSTIONS)
class Meta:
unique_together = (('student_id', 'student_position'))
def __str__(self):
return f"{self.student} - {self.student_position}"
class StudentCounsellingInfo(models.Model):
student_guide = models.ForeignKey(StudentCounsellingTeam,on_delete=models.CASCADE)
student = models.OneToOneField(Student,on_delete=models.CASCADE)
def __str__(self):
return f"{self.student_guide} - {self.student}"
class CounsellingIssueCategory(models.Model):
category_id = models.CharField(max_length=40,unique=True)
category = models.CharField(max_length=40)
def __str__(self):
return f"{self.category}"
class CounsellingIssue(models.Model):
issue_raised_date = models.DateTimeField(default=datetime.now)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
issue_category = models.ForeignKey(CounsellingIssueCategory,on_delete=models.CASCADE)
issue = models.TextField(max_length=500,)
issue_status = models.CharField(max_length=20,choices=CounsellingCellConstants.ISSUE_STATUS,default="status_unresolved")
response_remark = models.TextField(max_length=500,null=True)
resolved_by = models.ForeignKey(ExtraInfo, on_delete=models.CASCADE,null=True)
def __str__(self):
return f"{self.issue} - {student}"
class CounsellingFAQ(models.Model):
counselling_question = models.TextField(max_length=1000)
counselling_answer = models.TextField(max_length=5000)
counselling_category = models.ForeignKey(CounsellingIssueCategory,on_delete=models.CASCADE)
def __str__(self):
return f"{self.counselling_question}"
class CounsellingMeeting(models.Model):
meeting_host= models.ForeignKey(ExtraInfo,on_delete=models.CASCADE,null=True, blank=True)
meeting_date = models.DateField(default=date.today)
meeting_time = models.CharField(max_length=20, choices=CounsellingCellConstants.TIME)
agenda = models.TextField()
venue = models.CharField(max_length=20)
student_invities = models.TextField(max_length=500,default=None)
def __str__(self):
return '{} - {}'.format(self.meeting_time, self.agenda)
class CounsellingMinutes(models.Model):
counselling_meeting = models.ForeignKey(CounsellingMeeting, on_delete=models.CASCADE)
counselling_minutes = models.FileField(upload_to='counselling_cell/')
def __str__(self):
return '{} - {}'.format(self.counselling_meeting, self.counselling_minutes)
class StudentMeetingRequest(models.Model):
requested_time = models.DateTimeField()
student = models.ForeignKey(Student, on_delete=models.CASCADE)
description = models.TextField(max_length=1000)
requested_student_invitee = models.ForeignKey(StudentCounsellingTeam,on_delete=models.CASCADE,null=True, blank=True)
requested_faculty_invitee = models.ForeignKey(FacultyCounsellingTeam,on_delete=models.CASCADE,null=True, blank=True)
requested_meeting_status = models.CharField(max_length=20,choices=CounsellingCellConstants.MEETING_STATUS,default="status_pending")
recipient_reply = models.TextField(max_length=1000)
def __str__(self):
return f"{self.student} - {self.requested_time}"
```
#### File: hostel_management/templatetags/custom_tags.py
```python
from django import template
register = template.Library()
def get_hall_no(value, args):
# print("value ", value)
# print("args ", args, type(args))
args = str(args)
# print("value.args ", value[args])
return value[args]
register.filter('get_hall_no', get_hall_no)
```
#### File: placement_cell/api/serializers.py
```python
from rest_framework.authtoken.models import Token
from rest_framework import serializers
from applications.placement_cell.models import (Achievement, Course, Education,
Experience, Has, Patent,
Project, Publication, Skill,
PlacementStatus, NotifyStudent)
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('__all__')
class HasSerializer(serializers.ModelSerializer):
skill_id = SkillSerializer()
class Meta:
model = Has
fields = ('skill_id','skill_rating')
def create(self, validated_data):
skill = validated_data.pop('skill_id')
skill_id, created = Skill.objects.get_or_create(**skill)
try:
has_obj = Has.objects.create(skill_id=skill_id,**validated_data)
except:
raise serializers.ValidationError({'skill': 'This skill is already present'})
return has_obj
class EducationSerializer(serializers.ModelSerializer):
class Meta:
model = Education
fields = ('__all__')
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('__all__')
class ExperienceSerializer(serializers.ModelSerializer):
class Meta:
model = Experience
fields = ('__all__')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('__all__')
class AchievementSerializer(serializers.ModelSerializer):
class Meta:
model = Achievement
fields = ('__all__')
class PublicationSerializer(serializers.ModelSerializer):
class Meta:
model = Publication
fields = ('__all__')
class PatentSerializer(serializers.ModelSerializer):
class Meta:
model = Patent
fields = ('__all__')
class NotifyStudentSerializer(serializers.ModelSerializer):
class Meta:
model = NotifyStudent
fields = ('__all__')
class PlacementStatusSerializer(serializers.ModelSerializer):
notify_id = NotifyStudentSerializer()
class Meta:
model = PlacementStatus
fields = ('notify_id', 'invitation', 'placed', 'timestamp', 'no_of_days')
``` |
{
"source": "29rou/OpenJij",
"score": 2
} |
#### File: openjij/sampler/csqa_sampler.py
```python
import numpy as np
import openjij
from openjij.sampler import measure_time
from openjij.sampler import SQASampler
from openjij.utils.decorator import deprecated_alias
import cxxjij
class CSQASampler(SQASampler):
"""Sampler with continuous-time simulated quantum annealing (CSQA) using Hamiltonian
.. math::
H(s) = s H_p + \\Gamma (1-s)\\sum_i \\sigma_i^x
where :math:`H_p` is the problem Hamiltonian we want to solve.
Args:
beta (float): Inverse temperature.
gamma (float): Amplitude of quantum fluctuation.
schedule (list): schedule list
step_num (int): Number of Monte Carlo step.
schedule_info (dict): Information about a annealing schedule.
num_reads (int): Number of iterations.
num_sweeps (int): number of sweeps
num_reads (int): Number of iterations.
schedule_info (dict): Information about a annealing schedule.
"""
def __init__(self,
beta=5.0, gamma=1.0,
num_sweeps=1000, schedule=None,
num_reads=1):
self.beta = beta
self.gamma = gamma
self.num_reads = num_reads
self.num_sweeps = num_sweeps
self.schedule = schedule
self.energy_bias = 0.0
self._schedule_setting = {
'beta': beta,
'gamma': gamma,
'num_sweeps': num_sweeps,
'num_reads': num_reads,
}
def _get_result(self, system, model):
info = {}
info['spin_config'] = system.spin_config
state = cxxjij.result.get_solution(system)
return state, info
def sample_ising(self, h, J,
beta=None, gamma=None,
num_sweeps=None, schedule=None,
num_reads=1,
initial_state=None, updater='swendsenwang',
reinitialize_state=True, seed=None):
"""Sampling from the Ising model.
Args:
h (dict): linear biases
J (dict): quadratic biases
beta (float, optional): inverse temperature
gamma (float, optional): strength of transverse field
num_sweeps (int, optional): number of sampling.
schedule (list, optional): schedule list
num_reads (int, optional): number of iterations
initial_state (optional): initial state of spins
updater (str, optional): updater algorithm
reinitialize_state (bool, optional): Re-initilization at each sampling. Defaults to True.
seed (int, optional): Sampling seed.
Returns:
:class:`openjij.sampler.response.Response`: results
Examples:
for Ising case::
>>> h = {0: -1, 1: -1, 2: 1, 3: 1}
>>> J = {(0, 1): -1, (3, 4): -1}
>>> sampler = oj.CSQASampler()
>>> res = sampler.sample_ising(h, J)
for QUBO case::
>>> Q = {(0, 0): -1, (1, 1): -1, (2, 2): 1, (3, 3): 1, (4, 4): 1, (0, 1): -1, (3, 4): 1}
>>> sampler = oj.CSQASampler()
>>> res = sampler.sample_qubo(Q)
"""
bqm = openjij.BinaryQuadraticModel(
linear=h, quadratic=J, vartype='SPIN', sparse=True
)
#Continuous time ising system only supports sparse ising graph
ising_graph = bqm.get_cxxjij_ising_graph()
self._setting_overwrite(
beta=beta, gamma=gamma,
num_sweeps=num_sweeps, num_reads=num_reads
)
self._annealing_schedule_setting(
bqm, beta, gamma, num_sweeps, schedule)
# make init state generator --------------------------------
if initial_state is None:
def init_generator():
spin_config = np.random.choice([1,-1], len(bqm.variables))
return list(spin_config)
else:
def init_generator(): return initial_state
# -------------------------------- make init state generator
# choose updater -------------------------------------------
sqa_system = cxxjij.system.make_continuous_time_ising(
init_generator(), ising_graph, self.gamma
)
_updater_name = updater.lower().replace('_', '').replace(' ', '')
if _updater_name == 'swendsenwang':
algorithm = cxxjij.algorithm.Algorithm_ContinuousTimeSwendsenWang_run
else:
raise ValueError('updater is one of "swendsen wang"')
# ------------------------------------------- choose updater
response = self._cxxjij_sampling(
bqm, init_generator,
algorithm, sqa_system,
reinitialize_state, seed
)
response.info['schedule'] = self.schedule_info
return response
```
#### File: openjij/sampler/sqa_sampler.py
```python
import numpy as np
import openjij
from openjij.sampler import measure_time
from openjij.sampler import BaseSampler
from openjij.utils.decorator import deprecated_alias
import cxxjij
from cimod.utils import get_state_and_energy
import dimod
class SQASampler(BaseSampler):
"""Sampler with Simulated Quantum Annealing (SQA).
Inherits from :class:`openjij.sampler.sampler.BaseSampler`.
Hamiltonian
.. math::
H(s) = s H_p + \\Gamma (1-s)\\sum_i \\sigma_i^x
where :math:`H_p` is the problem Hamiltonian we want to solve.
Args:
beta (float): Inverse temperature.
gamma (float): Amplitude of quantum fluctuation.
trotter (int): Trotter number.
num_sweeps (int): number of sweeps
schedule (list): schedule list
num_reads (int): Number of iterations.
schedule_info (dict): Information about a annealing schedule.
Raises:
ValueError: If the schedule violates as below.
- not list or numpy.array.
- schedule range is '0 <= s <= 1'.
"""
@property
def parameters(self):
return {
'beta': ['parameters'],
'gamma': ['parameters'],
'trotter': ['parameters'],
}
@deprecated_alias(iteration='num_reads')
def __init__(self,
beta=5.0, gamma=1.0,
num_sweeps=1000, schedule=None,
trotter=4,
num_reads=1):
self.beta = beta
self.gamma = gamma
self.trotter = trotter
self.num_reads = num_reads
self.num_sweeps = num_sweeps
self.schedule = schedule
self._schedule_setting = {
'beta': beta,
'gamma': gamma,
'num_sweeps': num_sweeps,
'num_reads': num_reads,
}
self._make_system = {
'singlespinflip': cxxjij.system.make_transverse_ising
}
self._algorithm = {
'singlespinflip': cxxjij.algorithm.Algorithm_SingleSpinFlip_run
}
def _convert_validation_schedule(self, schedule, beta):
if not isinstance(schedule, (list, np.array)):
raise ValueError("schedule should be list or numpy.array")
if isinstance(schedule[0], cxxjij.utility.TransverseFieldSchedule):
return schedule
# schedule validation 0 <= s <= 1
sch = np.array(schedule).T[0]
if not np.all((0 <= sch) & (sch <= 1)):
raise ValueError("schedule range is '0 <= s <= 1'.")
if len(schedule[0]) == 2:
# schedule element: (s, one_mc_step) with beta fixed
# convert to list of cxxjij.utility.TransverseFieldSchedule
cxxjij_schedule = []
for s, one_mc_step in schedule:
_schedule = cxxjij.utility.TransverseFieldSchedule()
_schedule.one_mc_step = one_mc_step
_schedule.updater_parameter.beta = beta
_schedule.updater_parameter.s = s
cxxjij_schedule.append(_schedule)
return cxxjij_schedule
elif len(schedule[0]) == 3:
# schedule element: (s, beta, one_mc_step)
# convert to list of cxxjij.utility.TransverseFieldSchedule
cxxjij_schedule = []
for s, _beta, one_mc_step in schedule:
_schedule = cxxjij.utility.TransverseFieldSchedule()
_schedule.one_mc_step = one_mc_step
_schedule.updater_parameter.beta = _beta
_schedule.updater_parameter.s = s
cxxjij_schedule.append(_schedule)
return cxxjij_schedule
else:
raise ValueError(
"""schedule is list of tuple or list
(annealing parameter s : float, step_length : int) or
(annealing parameter s : float, beta: float, step_length : int)
""")
def _get_result(self, system, model):
state, info = super()._get_result(system, model)
q_state = system.trotter_spins[:-1].T.astype(int)
c_energies = [get_state_and_energy(model, state)[1] for state in q_state]
info['trotter_state'] = q_state
info['trotter_energies'] = c_energies
return state, info
def sample(self, bqm,
beta=None, gamma=None,
num_sweeps=None, schedule=None, trotter=None,
num_reads=1,
initial_state=None, updater='single spin flip',
sparse=False,
reinitialize_state=True, seed=None):
"""Sampling from the Ising model
Args:
bqm (oj.BinaryQuadraticModel) binary quadratic model
beta (float, optional): inverse tempareture.
gamma (float, optional): strangth of transverse field. Defaults to None.
num_sweeps (int, optional): number of sweeps. Defaults to None.
schedule (list[list[float, int]], optional): List of annealing parameter. Defaults to None.
trotter (int): Trotter number.
num_reads (int, optional): number of sampling. Defaults to 1.
initial_state (list[int], optional): Initial state. Defaults to None.
updater (str, optional): update method. Defaults to 'single spin flip'.
reinitialize_state (bool, optional): Re-initilization at each sampling. Defaults to True.
seed (int, optional): Sampling seed. Defaults to None.
Raises:
ValueError:
Returns:
:class:`openjij.sampler.response.Response`: results
Examples:
for Ising case::
>>> h = {0: -1, 1: -1, 2: 1, 3: 1}
>>> J = {(0, 1): -1, (3, 4): -1}
>>> sampler = oj.SQASampler()
>>> res = sampler.sample_ising(h, J)
for QUBO case::
>>> Q = {(0, 0): -1, (1, 1): -1, (2, 2): 1, (3, 3): 1, (4, 4): 1, (0, 1): -1, (3, 4): 1}
>>> sampler = oj.SQASampler()
>>> res = sampler.sample_qubo(Q)
"""
if type(bqm) == dimod.BinaryQuadraticModel:
bqm = openjij.BinaryQuadraticModel(dict(bqm.linear), dict(bqm.quadratic), bqm.offset, bqm.vartype)
ising_graph, offset = bqm.get_cxxjij_ising_graph()
self._setting_overwrite(
beta=beta, gamma=gamma,
num_sweeps=num_sweeps, num_reads=num_reads,
trotter=trotter
)
# set annealing schedule -------------------------------
self._annealing_schedule_setting(
bqm, beta, gamma, num_sweeps, schedule)
# ------------------------------- set annealing schedule
# make init state generator --------------------------------
if initial_state is None:
def init_generator(): return [ising_graph.gen_spin(seed) if seed != None else ising_graph.gen_spin()
for _ in range(self.trotter)]
else:
if isinstance(initial_state, dict):
initial_state = [initial_state[k] for k in bqm.variables]
_init_state = np.array(initial_state)
# validate initial_state size
if len(initial_state) != ising_graph.size():
raise ValueError(
"the size of the initial state should be {}"
.format(ising_graph.size()))
trotter_init_state = [_init_state
for _ in range(self.trotter)]
def init_generator(): return trotter_init_state
# -------------------------------- make init state generator
# choose updater -------------------------------------------
_updater_name = updater.lower().replace('_', '').replace(' ', '')
if _updater_name not in self._algorithm:
raise ValueError('updater is one of "single spin flip"')
algorithm = self._algorithm[_updater_name]
sqa_system = self._make_system[_updater_name](
init_generator(), ising_graph, self.gamma
)
# ------------------------------------------- choose updater
response = self._cxxjij_sampling(
bqm, init_generator,
algorithm, sqa_system,
reinitialize_state, seed
)
response.info['schedule'] = self.schedule_info
return response
def _annealing_schedule_setting(self, model,
beta=None, gamma=None,
num_sweeps=None,
schedule=None):
self.beta = beta if beta else self.beta
self.gamma = gamma if gamma else self.gamma
if schedule or self.schedule:
self._schedule = self._convert_validation_schedule(
schedule if schedule else self.schedule, self.beta
)
self.schedule_info = {'schedule': 'custom schedule'}
else:
self.num_sweeps = num_sweeps if num_sweeps else self.num_sweeps
self._schedule, beta_gamma = quartic_ising_schedule(
model=model,
beta=self._schedule_setting['beta'],
gamma=self._schedule_setting['gamma'],
num_sweeps=self._schedule_setting['num_sweeps']
)
self.schedule_info = {
'beta': beta_gamma[0],
'gamma': beta_gamma[1],
'num_sweeps': self._schedule_setting['num_sweeps']
}
def linear_ising_schedule(model, beta, gamma, num_sweeps):
"""Generate linear ising schedule.
Args:
model (:class:`openjij.model.model.BinaryQuadraticModel`): BinaryQuadraticModel
beta (float): inverse temperature
gamma (float): transverse field
num_sweeps (int): number of steps
Returns:
generated schedule
"""
schedule = cxxjij.utility.make_transverse_field_schedule_list(
beta=beta, one_mc_step=1, num_call_updater=num_sweeps
)
return schedule, [beta, gamma]
#TODO: more optimal schedule?
def quartic_ising_schedule(model, beta, gamma, num_sweeps):
"""Generate quartic ising schedule based on <NAME> and <NAME>, Journal of Mathematical Physics 49, 125210 (2008).
Args:
model (:class:`openjij.model.model.BinaryQuadraticModel`): BinaryQuadraticModel
beta (float): inverse temperature
gamma (float): transverse field
num_sweeps (int): number of steps
Returns:
generated schedule
"""
s = np.linspace(0, 1, num_sweeps)
fs = s**4*(35-84*s+70*s**2-20*s**3)
schedule = [((beta, elem), 1) for elem in fs]
return schedule, [beta, gamma]
``` |
{
"source": "29rou/pyqubo",
"score": 2
} |
#### File: 29rou/pyqubo/setup.py
```python
import os
import platform
import re
import subprocess
import sys
import sysconfig
from distutils.version import LooseVersion
from importlib.util import find_spec
from setuptools import setup, Command, Extension
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
class PackageInfo(object):
def __init__(self, info_file):
with open(info_file) as f:
exec(f.read(), self.__dict__)
self.__dict__.pop('__builtins__', None)
def __getattribute__(self, name):
return super(PackageInfo, self).__getattribute__(name)
package_info = PackageInfo(os.path.join('pyqubo', 'package_info.py'))
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.16':
raise RuntimeError("CMake >= 3.16 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cfg = 'Debug' if self.debug else 'Release'
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
"-DCMAKE_BUILD_TYPE={}".format(cfg), # not used on MSVC, but no harm
]
build_args = []
if platform.system() != "Windows":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
try:
import ninja # noqa: F401
cmake_args += ["-GNinja"]
except ImportError:
pass
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
build_args += ["--config", cfg]
# disable macos openmp since addtional dependency is needed.
if platform.system() == 'Darwin':
# disable macos openmp since addtional dependency is needed.
if not {'True': True, 'False': False}[os.getenv('USE_OMP', 'False')]:
print("USE_OMP=No")
cmake_args += ['-DUSE_OMP=No']
else:
print("USE_OMP=Yes")
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if archs:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += ["-j{}".format(self.parallel)]
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
class CppTest(Command):
def initialize_options(self):
self.cpplibdir = self.distutils_dir_name()
def finalize_options(self):
pass
user_options = []
def distutils_dir_name(self):
"""Returns the name of a distutils build directory"""
f = "temp.{platform}-{version[0]}.{version[1]}"
return f.format(platform=sysconfig.get_platform(),
version=sys.version_info)
def run(self):
subprocess.call(['make pyqubo_test'],
cwd=os.path.join('build', self.cpplibdir), shell=True)
subprocess.call(['./tests/pyqubo_test'],
cwd=os.path.join('build', self.cpplibdir), shell=True)
packages = ['pyqubo', 'pyqubo.integer', 'pyqubo.utils']
install_requires = [
"typing-extensions; python_version < '3.8'",
'numpy>=1.17.3',
"dimod>=0.10.0, <0.11",
'dwave-neal>=0.5.7',
'Deprecated>=1.2.12',
'six>=1.15.0'
]
tests_require = [
'coverage>=4.5.1',
'codecov>=2.1.9',
]
python_requires = '>=3.7, <=3.10'
setup(
name=package_info.__package_name__,
version=package_info.__version__,
description=package_info.__description__,
long_description=open('README.rst').read(),
author=package_info.__contact_names__,
author_email=package_info.__contact_emails__,
maintainer=package_info.__contact_names__,
maintainer_email=package_info.__contact_emails__,
url=package_info.__repository_url__,
download_url=package_info.__download_url__,
license=package_info.__license__,
ext_modules=[CMakeExtension('cpp_pyqubo')],
cmdclass=dict(build_ext=CMakeBuild, cpp_test=CppTest),
zip_safe=False,
packages=packages,
keywords=package_info.__keywords__,
install_requires=install_requires,
python_requires=python_requires,
tests_require=tests_require,
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: POSIX :: Linux',
]
)
``` |
{
"source": "29Takuya/dash-docset-optuna",
"score": 2
} |
#### File: visualization/generated/optuna-visualization-plot_slice-1.py
```python
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x ** 2 + y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
fig = optuna.visualization.plot_slice(study, params=["x", "y"])
fig.show()
``` |
{
"source": "2a5A1Ghu1/Phore",
"score": 2
} |
#### File: test/functional/test_case_base.py
```python
from test.functional.test_framework import BitcoinTestFramework
class TestCaseBase(BitcoinTestFramework) :
def set_test_params(self) :
pass
def run_test(self) :
key_list = dir(self)
for name in key_list :
if name.startswith("initialize") :
print('Initialize test case:', self.__class__.__name__ + '.' + name)
getattr(self, name)()
for name in key_list :
if name.startswith("test_") :
print('Test case:', self.__class__.__name__ + '.' + name)
getattr(self, name)()
for name in key_list :
if name.startswith("finalize") :
print('Finalize test case:', self.__class__.__name__ + '.' + name)
getattr(self, name)()
``` |
{
"source": "2adityap/stock-screener",
"score": 3
} |
#### File: 2adityap/stock-screener/prediction.py
```python
import pandas as pd
import pandas_datareader as web
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
import math
import matplotlib.pyplot as mtlplt
import yfinance as yf
def create_df(symbol, start, end):
data = yf.download(symbol, start = start, end = end)
data_frame = pd.DataFrame(data)
data_frame.to_csv('stockdata.csv',index = "Date")
df = pd.read_csv('stockdata.csv')
return df
def graph(dataframe):
mtlplt.figure(figsize=(20,9))
mtlplt.title("Closing Data")
mtlplt.plot(dataframe["Close"])
mtlplt.xticks(range(0,dataframe.shape[0],500),dataframe["Date"].loc[::500],rotation=45)
mtlplt.xlabel('Date', fontsize=20)
mtlplt.ylabel('Close price in $(USD)',fontsize=20)
mtlplt.show()
def feature_scaling(dataset):
scale = MinMaxScaler(feature_range=(0,1)) #scales features between
scaled_data = scale.fit_transform(dataset)
return scaled_data
def train_close_prices(dataframe):
close_data = dataframe.filter(["Close"])
close_dataset = close_data.values #convert to array
training_length = math.ceil(len(close_dataset)*.8) #80:20 ratio applied
scale = MinMaxScaler(feature_range=(0,1)) #scales features between
scaled_data = scale.fit_transform(close_dataset)
training_data = scaled_data[0:training_length, :]
Xtrain = []
Ytrain = []
for i in range(60, len(training_data)):
Xtrain.append(training_data[i-60:i])
Ytrain.append(training_data[i])
Xtrain = np.array(Xtrain)
Ytrain = np.array(Ytrain)
Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1],1))
model = Sequential()
neurons = 50
model.add(LSTM(neurons, return_sequences=True, input_shape=(Xtrain.shape[1],1)))
model.add(LSTM(neurons, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
history_data = model.fit(Xtrain, Ytrain, batch_size=50, epochs=200, verbose=2, validation_split=0.2)
graph_convergence(history_data)
testing_data = scaled_data[training_length-60:,:]
Xtest = []
Ytest = close_dataset[training_length:, :]
for i in range(60, len(testing_data)):
Xtest.append(testing_data[i-60:i])
Xtest = np.array(Xtest)
Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1],1))
predictions = model.predict(Xtest)
predictions = scale.inverse_transform(predictions)
training = close_data[:training_length]
validation = close_data[training_length:]
validation['Predictions'] = predictions
graph_algorithm_training(training, validation, dataframe)
print(dataframe)
predict_next_day(model, dataframe, scale)
def graph_convergence(history_data):
mtlplt.figure(figsize=(20,10))
mtlplt.title('Training validation loss')
mtlplt.plot(history_data.history['loss'])
mtlplt.plot(history_data.history['val_loss'])
mtlplt.ylabel('Training loss')
mtlplt.xlabel('epochs')
mtlplt.legend(['train' , 'validation'], loc = 'upper left')
mtlplt.show()
def graph_algorithm_training(training, validation, dataframe):
## Visualize trainning, validating and predicting values in graph
mtlplt.figure(figsize=(20,10))
mtlplt.title('Trained Model')
mtlplt.xticks(range(0,dataframe.shape[0],500),dataframe['Date'].loc[::500],rotation=45)
mtlplt.xlabel('Date', fontsize=20)
mtlplt.ylabel('Close Stock Price $ (USD)', fontsize=20)
mtlplt.plot(training['Close'])
mtlplt.plot(validation[['Close', 'Predictions']])
mtlplt.legend(['Training', 'Validation', 'Predictions'], loc='lower right')
mtlplt.show()
def predict_next_day(model, dataframe, scale):
df = dataframe.filter(["Close"])
last60days = df[-60:].values
last60days_scaled = scale.transform(last60days)
X_test = []
X_test.append(last60days_scaled)
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_price = model.predict(X_test)
predicted_price = scale.inverse_transform(predicted_price)
print(predicted_price)
def main():
#Something to do with start price that affects wrong output
df = create_df("NVDA", "2013-01-01", "2021-01-04")
train_close_prices(df)
if __name__ == "__main__":
main()
``` |
{
"source": "2AiBAIT/StoneRecog",
"score": 3
} |
#### File: 2AiBAIT/StoneRecog/model_old.py
```python
import tensorflow as tf
from tensorflow.python.keras import Model
from tensorflow.python.keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, Dropout, Flatten, Concatenate, Reshape, Activation
from tensorflow.python.keras.regularizers import l2
from lrn import LRN
class jbdm_v1(object):
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
putin = Input(shape=input_size)
conv1_7x7_s2 = Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same', activation='relu',
name='conv1/7x7_s2', kernel_regularizer=l2(0.0002))(putin)
pool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name='pool1/3x3_s2')(conv1_7x7_s2)
pool1_norm1 = LRN(name='pool1/norm1')(pool1_3x3_s2)
conv2_3x3_reduce = Conv2D(64, kernel_size=(1, 1), padding='valid', activation='relu', name='conv2/3x3_reduce',
kernel_regularizer=l2(0.0002))(pool1_norm1)
conv2_3x3 = Conv2D(192, kernel_size=(3, 3), padding='same', activation='relu', name='conv2/3x3',
kernel_regularizer=l2(0.0002))(conv2_3x3_reduce)
conv2_norm2 = LRN(name='conv2/norm2')(conv2_3x3)
pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name='pool2/3x3_s2')(conv2_norm2)
inception_3a_1x1 = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu', name='inception_3a/1x1',
kernel_regularizer=l2(0.0002))(pool2_3x3_s2)
inception_3a_3x3_reduce = Conv2D(96, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_3a/3x3_reduce', kernel_regularizer=l2(0.0002))(pool2_3x3_s2)
inception_3a_3x3 = Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu', name='inception_3a/3x3',
kernel_regularizer=l2(0.0002))(inception_3a_3x3_reduce)
inception_3a_5x5_reduce = Conv2D(16, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_3a/5x5_reduce', kernel_regularizer=l2(0.0002))(pool2_3x3_s2)
inception_3a_5x5 = Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu', name='inception_3a/5x5',
kernel_regularizer=l2(0.0002))(inception_3a_5x5_reduce)
inception_3a_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_3a/pool')(
pool2_3x3_s2)
inception_3a_pool_proj = Conv2D(32, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_3a/pool_proj', kernel_regularizer=l2(0.0002))(inception_3a_pool)
inception_3a_output = Concatenate(axis=-1, name='inception_3a/output')(
[inception_3a_1x1, inception_3a_3x3, inception_3a_5x5, inception_3a_pool_proj])
inception_3b_1x1 = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu', name='inception_3b/1x1',
kernel_regularizer=l2(0.0002))(inception_3a_output)
inception_3b_3x3_reduce = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_3b/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_3a_output)
inception_3b_3x3 = Conv2D(192, kernel_size=(3, 3), padding='same', activation='relu', name='inception_3b/3x3',
kernel_regularizer=l2(0.0002))(inception_3b_3x3_reduce)
inception_3b_5x5_reduce = Conv2D(32, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_3b/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_3a_output)
inception_3b_5x5 = Conv2D(96, kernel_size=(5, 5), padding='same', activation='relu', name='inception_3b/5x5',
kernel_regularizer=l2(0.0002))(inception_3b_5x5_reduce)
inception_3b_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_3b/pool')(
inception_3a_output)
inception_3b_pool_proj = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_3b/pool_proj', kernel_regularizer=l2(0.0002))(inception_3b_pool)
inception_3b_output = Concatenate(axis=-1, name='inception_3b/output')(
[inception_3b_1x1, inception_3b_3x3, inception_3b_5x5, inception_3b_pool_proj])
inception_4a_1x1 = Conv2D(192, kernel_size=(1, 1), padding='same', activation='relu', name='inception_4a/1x1',
kernel_regularizer=l2(0.0002))(inception_3b_output)
inception_4a_3x3_reduce = Conv2D(96, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4a/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_3b_output)
inception_4a_3x3 = Conv2D(208, kernel_size=(3, 3), padding='same', activation='relu', name='inception_4a/3x3',
kernel_regularizer=l2(0.0002))(inception_4a_3x3_reduce)
inception_4a_5x5_reduce = Conv2D(16, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4a/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_3b_output)
inception_4a_5x5 = Conv2D(48, kernel_size=(5, 5), padding='same', activation='relu', name='inception_4a/5x5',
kernel_regularizer=l2(0.0002))(inception_4a_5x5_reduce)
inception_4a_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_4a/pool')(
inception_3b_output)
inception_4a_pool_proj = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4a/pool_proj', kernel_regularizer=l2(0.0002))(inception_4a_pool)
inception_4a_output = Concatenate(axis=-1, name='inception_4a/output')(
[inception_4a_1x1, inception_4a_3x3, inception_4a_5x5, inception_4a_pool_proj])
loss1_ave_pool = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), name='loss1/ave_pool')(inception_4a_output)
loss1_conv = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu', name='loss1/conv',
kernel_regularizer=l2(0.0002))(loss1_ave_pool)
loss1_fc = Dense(1024, activation='relu', name='loss1/fc', kernel_regularizer=l2(0.0002))(loss1_conv)
loss1_drop_fc = Dropout(rate=0.7)(loss1_fc)
loss1_flatten = Flatten()(loss1_drop_fc)
loss1_classifier = Dense(num_class, name='loss1/classifier', kernel_regularizer=l2(0.0002))(loss1_flatten)
loss1_classifier_act = Activation('softmax')(loss1_classifier)
inception_4b_1x1 = Conv2D(160, kernel_size=(1, 1), padding='same', activation='relu', name='inception_4b/1x1',
kernel_regularizer=l2(0.0002))(inception_4a_output)
inception_4b_3x3_reduce = Conv2D(112, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4b/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_4a_output)
inception_4b_3x3 = Conv2D(224, kernel_size=(3, 3), padding='same', activation='relu', name='inception_4b/3x3',
kernel_regularizer=l2(0.0002))(inception_4b_3x3_reduce)
inception_4b_5x5_reduce = Conv2D(24, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4b/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_4a_output)
inception_4b_5x5 = Conv2D(64, kernel_size=(5, 5), padding='same', activation='relu', name='inception_4b/5x5',
kernel_regularizer=l2(0.0002))(inception_4b_5x5_reduce)
inception_4b_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_4b/pool')(
inception_4a_output)
inception_4b_pool_proj = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4b/pool_proj', kernel_regularizer=l2(0.0002))(inception_4b_pool)
inception_4b_output = Concatenate(axis=-1, name='inception_4b/output')(
[inception_4b_1x1, inception_4b_3x3, inception_4b_5x5, inception_4b_pool_proj])
inception_4c_1x1 = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu', name='inception_4c/1x1',
kernel_regularizer=l2(0.0002))(inception_4b_output)
inception_4c_3x3_reduce = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4c/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_4b_output)
inception_4c_3x3 = Conv2D(256, kernel_size=(3, 3), padding='same', activation='relu', name='inception_4c/3x3',
kernel_regularizer=l2(0.0002))(inception_4c_3x3_reduce)
inception_4c_5x5_reduce = Conv2D(24, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4c/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_4b_output)
inception_4c_5x5 = Conv2D(64, kernel_size=(5, 5), padding='same', activation='relu', name='inception_4c/5x5',
kernel_regularizer=l2(0.0002))(inception_4c_5x5_reduce)
inception_4c_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_4c/pool')(
inception_4b_output)
inception_4c_pool_proj = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4c/pool_proj', kernel_regularizer=l2(0.0002))(inception_4c_pool)
inception_4c_output = Concatenate(axis=-1, name='inception_4c/output')(
[inception_4c_1x1, inception_4c_3x3, inception_4c_5x5, inception_4c_pool_proj])
inception_4d_1x1 = Conv2D(112, kernel_size=(1, 1), padding='same', activation='relu', name='inception_4d/1x1',
kernel_regularizer=l2(0.0002))(inception_4c_output)
inception_4d_3x3_reduce = Conv2D(144, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4d/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_4c_output)
inception_4d_3x3 = Conv2D(288, kernel_size=(3, 3), padding='same', activation='relu', name='inception_4d/3x3',
kernel_regularizer=l2(0.0002))(inception_4d_3x3_reduce)
inception_4d_5x5_reduce = Conv2D(32, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4d/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_4c_output)
inception_4d_5x5 = Conv2D(64, kernel_size=(5, 5), padding='same', activation='relu', name='inception_4d/5x5',
kernel_regularizer=l2(0.0002))(inception_4d_5x5_reduce)
inception_4d_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_4d/pool')(
inception_4c_output)
inception_4d_pool_proj = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4d/pool_proj', kernel_regularizer=l2(0.0002))(inception_4d_pool)
inception_4d_output = Concatenate(axis=-1, name='inception_4d/output')(
[inception_4d_1x1, inception_4d_3x3, inception_4d_5x5, inception_4d_pool_proj])
loss2_ave_pool = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), name='loss2/ave_pool')(inception_4d_output)
loss2_conv = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu', name='loss2/conv',
kernel_regularizer=l2(0.0002))(loss2_ave_pool)
loss2_fc = Dense(1024, activation='relu', name='loss2/fc', kernel_regularizer=l2(0.0002))(loss2_conv)
loss2_drop_fc = Dropout(rate=0.7)(loss2_fc)
loss2_flatten = Flatten()(loss2_drop_fc)
loss2_classifier = Dense(num_class, name='loss2/classifier', kernel_regularizer=l2(0.0002))(loss2_flatten)
loss2_classifier_act = Activation('softmax')(loss2_classifier)
inception_4e_1x1 = Conv2D(256, kernel_size=(1, 1), padding='same', activation='relu', name='inception_4e/1x1',
kernel_regularizer=l2(0.0002))(inception_4d_output)
inception_4e_3x3_reduce = Conv2D(160, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4e/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_4d_output)
inception_4e_3x3 = Conv2D(320, kernel_size=(3, 3), padding='same', activation='relu', name='inception_4e/3x3',
kernel_regularizer=l2(0.0002))(inception_4e_3x3_reduce)
inception_4e_5x5_reduce = Conv2D(32, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4e/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_4d_output)
inception_4e_5x5 = Conv2D(128, kernel_size=(5, 5), padding='same', activation='relu', name='inception_4e/5x5',
kernel_regularizer=l2(0.0002))(inception_4e_5x5_reduce)
inception_4e_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_4e/pool')(
inception_4d_output)
inception_4e_pool_proj = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_4e/pool_proj', kernel_regularizer=l2(0.0002))(inception_4e_pool)
inception_4e_output = Concatenate(axis=-1, name='inception_4e/output')(
[inception_4e_1x1, inception_4e_3x3, inception_4e_5x5, inception_4e_pool_proj])
inception_5a_1x1 = Conv2D(256, kernel_size=(1, 1), padding='same', activation='relu', name='inception_5a/1x1',
kernel_regularizer=l2(0.0002))(inception_4e_output)
inception_5a_3x3_reduce = Conv2D(160, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_5a/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_4e_output)
inception_5a_3x3 = Conv2D(320, kernel_size=(3, 3), padding='same', activation='relu', name='inception_5a/3x3',
kernel_regularizer=l2(0.0002))(inception_5a_3x3_reduce)
inception_5a_5x5_reduce = Conv2D(32, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_5a/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_4e_output)
inception_5a_5x5 = Conv2D(128, kernel_size=(5, 5), padding='same', activation='relu', name='inception_5a/5x5',
kernel_regularizer=l2(0.0002))(inception_5a_5x5_reduce)
inception_5a_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_5a/pool')(
inception_4e_output)
inception_5a_pool_proj = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_5a/pool_proj', kernel_regularizer=l2(0.0002))(inception_5a_pool)
inception_5a_output = Concatenate(axis=-1, name='inception_5a/output')(
[inception_5a_1x1, inception_5a_3x3, inception_5a_5x5, inception_5a_pool_proj])
inception_5b_1x1 = Conv2D(384, kernel_size=(1, 1), padding='same', activation='relu', name='inception_5b/1x1',
kernel_regularizer=l2(0.0002))(inception_5a_output)
inception_5b_3x3_reduce = Conv2D(192, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_5b/3x3_reduce', kernel_regularizer=l2(0.0002))(
inception_5a_output)
inception_5b_3x3 = Conv2D(384, kernel_size=(3, 3), padding='same', activation='relu', name='inception_5b/3x3',
kernel_regularizer=l2(0.0002))(inception_5b_3x3_reduce)
inception_5b_5x5_reduce = Conv2D(48, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_5b/5x5_reduce', kernel_regularizer=l2(0.0002))(
inception_5a_output)
inception_5b_5x5 = Conv2D(128, kernel_size=(5, 5), padding='same', activation='relu', name='inception_5b/5x5',
kernel_regularizer=l2(0.0002))(inception_5b_5x5_reduce)
inception_5b_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='inception_5b/pool')(
inception_5a_output)
inception_5b_pool_proj = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu',
name='inception_5b/pool_proj', kernel_regularizer=l2(0.0002))(inception_5b_pool)
inception_5b_output = Concatenate(axis=-1, name='inception_5b/output')(
[inception_5b_1x1, inception_5b_3x3, inception_5b_5x5, inception_5b_pool_proj])
pool5_7x7_s1 = AveragePooling2D(pool_size=(7, 7), strides=(1, 1), name='pool5/7x7_s2')(inception_5b_output)
pool5_drop_7x7_s1 = Dropout(rate=0.4)(pool5_7x7_s1)
loss3_flatten = Flatten()(pool5_drop_7x7_s1)
loss3_classifier = Dense(num_class, name='loss3/classifier', kernel_regularizer=l2(0.0002))(loss3_flatten)
loss3_classifier_act = Activation('softmax', name='prob')(loss3_classifier)
model = Model(inputs=putin, outputs=[loss1_classifier_act, loss2_classifier_act, loss3_classifier_act])
# model = Model(inputs=putin, outputs=[loss1_classifier_act])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(model.summary())
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
class jbdm_v2():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.AveragePooling2D(pool_size=(4, 4))(base_output)
base_output = tf.keras.layers.Flatten(name="flatten")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_05():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.Flatten(name="flatten")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_06():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.Flatten(name="flatten")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_1():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.Flatten(name="flatten")(base_output)
base_output = tf.keras.layers.Dense(2048, activation="relu")(base_output)
base_output = tf.keras.layers.Dropout(0.5)(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_2():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.Flatten(name="flatten")(base_output)
base_output = tf.keras.layers.Dropout(0.5)(base_output)
base_output = tf.keras.layers.Dense(2048, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_25():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
# baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet')
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dropout(0.5)(base_output)
base_output = tf.keras.layers.Dense(256, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_26():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dropout(0.5)(base_output)
base_output = tf.keras.layers.Dense(256, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_27():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dropout(0.25)(base_output)
base_output = tf.keras.layers.Dense(256, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_28():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dense(256, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_29():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dense(256, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_3():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2()
# baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
# include_top=False,
# input_tensor=Input(shape=input_size)
# )
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_31():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dense(128, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v2_33():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
base_output = tf.keras.layers.Dense(1024, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class jbdm_v3_1():
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None, lr=1e-3):
# baseModel = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(weights='imagenet')
baseModel = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
```
#### File: 2AiBAIT/StoneRecog/webscraper.py
```python
from bs4 import BeautifulSoup
import imagesize
import requests
import json
def my_range(start, end, step):
while start <= end:
yield start
start += step
'''TXT Variables'''
na = "N/A"
datasetPath = "D:/Pedras/"
'''
VALUES VARIABLES
rockname - <NAME>
rock_class - Class da Pedra
color - Cor da Pedra
place1 - Localização da Pedra
place2 - Localização da Pedra
trade - Nome de comercialização
pq - Qualidade do Polimento
pd - Durabilidade do Polimento
sff - Se serve para chãos
ar - Resistencia ao acido
bulk_value - Densidade aparente
pres_value - Resistencia a pressao
poro_value - Porosidade
fros_value - Resistencia ao frio
insk1 -
insk2 -
'''
def get_rock_details():
global width, height
for num in my_range(1, 6556, 1):
print("Pedra:", str(num), "/", str(6556))
url = requests.get('https://www.naturalstone-online.com/index.php?id=356&user_dnsaeng_pi1[steinID]=%d' % num).text
soup = BeautifulSoup(url, 'lxml')
summary = soup.find('div', class_='detailansicht textcolumns')
if summary.find('img', class_='stein_bild_big'):
if summary.find('img', class_='stein_bild_big').attrs['src']:
img_link = summary.find('img', class_='stein_bild_big').attrs['src']
file_path = (datasetPath + '%d.jpg' % num)
imageURL='https://www.naturalstone-online.com' + img_link
img_data = requests.get(imageURL).content
with open(file_path, 'wb') as handler:
handler.write(img_data)
# Get image dimensions width and height
width, height = imagesize.get(file_path)
#print(width, height)
'''Name of the Rock'''
if summary.find('div', class_='box_detailheader').h2:
rock_name = summary.find('div', class_='box_detailheader').h2.text
#print(rock_name)
else:
rock_name = na
#print(na)
'''Rock Class Name'''
if summary.find('div', class_='sub_img').h3:
rock_class = summary.find('div', class_='sub_img').h3.text
#print(rock_class)
else:
rock_class = na
#print(na)
'''Basic Properties'''
if summary.find('strong', text='Coloring:'):
color = summary.find('div', class_='detail_info').span.text
#print(color)
else:
color = na
#print(na)
#Pais e <NAME> Pedra
place1 = na
place2 = na
location = summary.find('strong', text='Location where found:')
if location:
place1 = location.next_sibling.next_sibling
location_country = location.next_sibling.next_sibling.next_sibling.next_sibling
if location_country is not None and location_country.name != 'unknown':
place2 = location_country
if summary.find('strong', text='Trading name:'):
trade = summary.find('strong', text='Trading name:').next_element.next_element.next_element
#print(trade)
else:
trade = na
#print(na)
'''Rock Technical Properties'''
if summary.find('strong', text='Polish quality:'):
pq = summary.find('strong', text='Polish quality:').next_element.next_element.next_element
#print(pq)
else:
pq = na
#print(na)
if summary.find('strong', text='Polish durability:'):
pd = summary.find('strong', text='Polish durability:').next_element.next_element.next_element
#print(pd)
else:
pd = na
#print(na)
if summary.find('strong', text='Suitable for flooring:'):
sff = summary.find('strong', text='Suitable for flooring:').next_element.next_element.next_element
#print(sff)
else:
sff = na
#print(na)
if summary.find('strong', text='Acid resistant:'):
ar = summary.find('strong', text='Acid resistant:').next_element.next_element.next_element
#print(ar)
else:
ar = na
#print(na)
# if summary.find('strong', text='Frost resistant:'):
# frfr = summary.find('strong', text='Frost resistant:').next_element.next_element.next_element
# print(fr + frfr + "\n")
# else:
# print(fr + na + "\n")
'''Rock Specifications'''
if summary.find('strong', text='Bulk density:'):
bulk_value = summary.find('strong', text='Bulk density:').parent.next_sibling.text
#print(bulk_value)
else:
bulk_value = na
#print(na)
if summary.find('strong', text='Pressure resistance:'):
pres_value = summary.find('strong', text='Pressure resistance:').parent.next_sibling.text
#print(pres_value)
else:
pres_value = na
#print(na)
if summary.find('strong', text='Porosity:'):
poro_value = summary.find('strong', text='Porosity:').parent.next_sibling.text
#print(poro_value)
else:
poro_value = na
#print(na)
if summary.find('strong', text='Frost resistant:'):
fros_value = summary.find('strong', text='Frost resistant:').next_sibling.next_sibling
#print(fros_value)
else:
fros_value = na
#print(na)
'''INSK'''
if summary.find('strong', text='INSK Nummer:'):
insk1 = summary.find('strong', text='INSK Nummer:').parent.next_sibling.text
#print(insk1)
else:
insk1 = na
#print(na)
if summary.find('strong', text='INSK Nummer alt:'):
insk2 = summary.find('strong', text='INSK Nummer alt:').parent.next_sibling.text
#print(insk2)
else:
insk2 = na
#print(na)
convert_json(num, rock_name, rock_class, color, place1, place2, trade, pq, pd, sff, ar, bulk_value,
pres_value, poro_value, fros_value, insk1, insk2, na, width, height, file_path, imageURL)
with open('rocks_db.json', 'w') as outfile:
json.dump(rocksArr, outfile, indent=2)
rocksArr = []
def convert_json(num, rock_name, rock_class, color, place1, place2, trade, pq, pd, sff, ar, bulk_value, pres_value,
poro_value, fros_value, insk1, insk2, na, width, height, file_path, url):
rocks_object = {
"ID": num,
"Classe": rock_class,
"Diretorio Img": file_path,
"Nome da Pedra": rock_name,
"Largura da Imagem": width,
"Altura da Imagem": height,
"Cor": color,
"Regiao": place1.strip(),
"Pais": place2.strip(),
"Nome de comercio": trade.strip(),
"Qualidade Polimento": pq,
"Durabilidade Polimento": pd,
"Pavimentos": sff,
"Resistencia ao acido": ar,
"Densidade aparente": bulk_value,
"Resistencia a pressao": pres_value,
"Porosidade": poro_value,
"Resistencia ao frio": fros_value,
"INSK": insk1,
"INSK alt": insk2,
"URL": url,
}
rocksArr.append(rocks_object)
get_rock_details()
``` |
{
"source": "2alin/openCV-Python-demos",
"score": 3
} |
#### File: openCV-Python-demos/paint-brush-trackbars/paint-brush.py
```python
import numpy as np
import cv2 as cv
drawing = False # true if mouse is pressed
px, py = -1, -1
# link two points with a series of circles
def link_points(px,py,x,y):
cv.line(img,(px,py),(x,y),(b,g,r),2*width,cv.LINE_AA)
# mouse callback function
def draw_circle(event,x,y,flags,param):
global px,py, drawing
if width == 0:# avoid console errors
return
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
px, py = x, y
elif event == cv.EVENT_MOUSEMOVE:
if drawing:
link_points(px,py,x,y)
cv.circle(img,(x,y),width,(b,g,r),-1,cv.LINE_AA)
px, py = x, y
elif event == cv.EVENT_LBUTTONUP:
drawing = False
cv.circle(img,(x,y),width,(b,g,r),-1,cv.LINE_AA)
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((500,800,3), np.uint8)
cv.namedWindow('image')
cv.setMouseCallback('image',draw_circle)
#create trackbars for color change
cv.createTrackbar('R','image',10,255,nothing)
cv.createTrackbar('G','image',100,255,nothing)
cv.createTrackbar('B','image',200,255,nothing)
#create trackbar for brush radius
cv.createTrackbar('Width','image',4,50,nothing)
while(True):
cv.imshow('image',img)
k = cv.waitKey(1) & 0xFF
if k == 27:
break
elif k == ord('c'): # clear screen
img = np.zeros((500,800,3), np.uint8)
# get current positions of all trackbars
r = cv.getTrackbarPos('R', 'image')
g = cv.getTrackbarPos('G', 'image')
b = cv.getTrackbarPos('B', 'image')
width = cv.getTrackbarPos('Width', 'image')
cv.destroyAllWindows()
``` |
{
"source": "2altoids/rdt-assignment",
"score": 3
} |
#### File: 2altoids/rdt-assignment/RDT_2_1.py
```python
import Network_2_1 as Network
import argparse
from time import sleep
import hashlib
class Packet:
## the number of bytes used to store packet length
seq_num_S_length = 10
length_S_length = 10
## length of md5 checksum in hex
checksum_length = 32
def __init__(self, seq_num, msg_S):
self.seq_num = seq_num
self.msg_S = msg_S
def get_seq_num(self):
return self.seq_num
@classmethod
def from_byte_S(self, byte_S):
if Packet.corrupt(byte_S):
raise RuntimeError('Cannot initialize Packet: byte_S is corrupt')
# extract the fields
seq_num = int(byte_S[Packet.length_S_length : Packet.length_S_length+Packet.seq_num_S_length])
msg_S = byte_S[Packet.length_S_length+Packet.seq_num_S_length+Packet.checksum_length :]
return self(seq_num, msg_S)
def get_byte_S(self):
#convert sequence number of a byte field of seq_num_S_length bytes
seq_num_S = str(self.seq_num).zfill(self.seq_num_S_length)
#convert length to a byte field of length_S_length bytes
length_S = str(self.length_S_length + len(seq_num_S) + self.checksum_length + len(self.msg_S)).zfill(self.length_S_length)
#compute the checksum
checksum = hashlib.md5((length_S+seq_num_S+self.msg_S).encode('utf-8'))
checksum_S = checksum.hexdigest()
#compile into a string
return length_S + seq_num_S + checksum_S + self.msg_S
@staticmethod
def corrupt(byte_S):
#extract the fields
length_S = byte_S[0:Packet.length_S_length]
seq_num_S = byte_S[Packet.length_S_length : Packet.seq_num_S_length+Packet.seq_num_S_length]
checksum_S = byte_S[Packet.seq_num_S_length+Packet.seq_num_S_length : Packet.seq_num_S_length+Packet.length_S_length+Packet.checksum_length]
msg_S = byte_S[Packet.seq_num_S_length+Packet.seq_num_S_length+Packet.checksum_length :]
#compute the checksum locally
checksum = hashlib.md5(str(length_S+seq_num_S+msg_S).encode('utf-8'))
computed_checksum_S = checksum.hexdigest()
#and check if the same
return checksum_S != computed_checksum_S
class RDT:
## latest sequence number used in a packet
seq_num = 1
## buffer of bytes read from network
byte_buffer = ''
def __init__(self, role_S, server_S, port):
self.network = Network.NetworkLayer(role_S, server_S, port)
def disconnect(self):
self.network.disconnect()
def rdt_1_0_send(self, msg_S):
pass
def rdt_1_0_receive(self):
pass
def rdt_2_1_send(self, msg_S):
send_packet = Packet(self.seq_num, msg_S)
while True:
self.network.udt_send(send_packet.get_byte_S())
self.byte_buffer = ''
while self.byte_buffer == '': # receive a packet
self.byte_buffer = self.network.udt_receive()
length = int(self.byte_buffer[:Packet.length_S_length]) # Extract the length of the packet
if Packet.corrupt(self.byte_buffer[:length]): # if received packet corrupt resend
print('Corrupt ack!')
continue
else:
receive_packet = Packet.from_byte_S(self.byte_buffer[:length])
if receive_packet.msg_S == "NAK":
continue
if receive_packet.msg_S == "ACK":
break
self.byte_buffer = ''
self.seq_num += 1
def rdt_2_1_receive(self):
ret_S = None
self.byte_buffer += self.network.udt_receive()
# keep extracting packets - if reordered, could get more than one
while True:
# check if we have received enough bytes
if len(self.byte_buffer) < Packet.length_S_length:
break
# extract length of packet
length = int(self.byte_buffer[:Packet.length_S_length])
if len(self.byte_buffer) < length:
break
# create packet from buffer content and add to return string
if Packet.corrupt(self.byte_buffer[0:length]):
print('Corrupt Packet recived!')
self.network.udt_send(Packet(self.seq_num, 'NAK').get_byte_S())
else:
receive_packet = Packet.from_byte_S(self.byte_buffer[0:length])
if receive_packet.msg_S != 'ACK' and receive_packet.msg_S != 'NAK':
ret_S = receive_packet.msg_S if (ret_S is None) else ret_S + receive_packet.msg_S
self.network.udt_send(Packet(receive_packet.seq_num, 'ACK').get_byte_S())
# print 'Sent ACK'
if self.seq_num == receive_packet.seq_num:
self.seq_num += 1
# remove the packet bytes from the buffer
self.byte_buffer = self.byte_buffer[length:]
return ret_S
def rdt_3_0_send(self, msg_S):
pass
def rdt_3_0_receive(self):
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RDT implementation.')
parser.add_argument('role', help='Role is either client or server.', choices=['client', 'server'])
parser.add_argument('server', help='Server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
rdt = RDT(args.role, args.server, args.port)
if args.role == 'client':
rdt.rdt_1_0_send('MSG_FROM_CLIENT')
sleep(2)
print(rdt.rdt_1_0_receive())
rdt.disconnect()
else:
sleep(1)
print(rdt.rdt_1_0_receive())
rdt.rdt_1_0_send('MSG_FROM_SERVER')
rdt.disconnect()
``` |
{
"source": "2amitprakash/Python_Codes",
"score": 3
} |
#### File: Python_Codes/Excel/ictc_report_section_i_report.py
```python
from openpyxl import Workbook
from openpyxl import load_workbook
import datetime
import common.connect_soch as conn
import pandas as pd
def fetch_data():
sql = 'Select \
table3."Received_Month", \
table3."Received_Year",\
SUM(table3."Number_of_individuals_received_pre-test_counseling/information")"Number_of_individuals_received_pre-test_counseling/information",\
SUM(table3."Number_of_individuals_receiving_post-test_counseling_and_given_results")"Number_of_individuals_receiving_post-test_counseling_and_given_results",\
SUM(table3."Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling")"Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling",\
SUM(table3."Number_of_individuals_tested_for_HIV")"Number_of_individuals_tested_for_HIV",\
SUM(table3."Number_of_individuals_received_result_within_7_days_of_HIV_Test")"Number_of_individuals_received_result_within_7_days_of_HIV_Test",\
SUM(table3."Number_of_HIV_positive_individuals_having_HIV-I_infection")"Number_of_HIV_positive_individuals_having_HIV-I_infection",\
SUM(table3."Number_of_HIV_positive_individuals_having_HIV-II_infection")"Number_of_HIV_positive_individuals_having_HIV-II_infection",\
SUM(table3."Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections")"Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections",\
SUM(table3."Number_of_individuals_tested_for_HIV_and_found_Negative")"Number_of_individuals_tested_for_HIV_and_found_Negative",\
SUM(table3."Number_of_Self-initiated_Individuals_tested_for_HIV")"Number_of_Self-initiated_Individuals_tested_for_HIV",\
SUM(table3."Number_of_Self-initiated_individuals_diagnosed_HIV_positive")"Number_of_Self-initiated_individuals_diagnosed_HIV_positive",\
SUM(table3."Number_of_provider_initiated_Individuals_tested_for_HIV")"Number_of_provider_initiated_Individuals_tested_for_HIV",\
SUM(table3."Number_of_provider_initiated_individuals_diagnosed_HIV_positive")"Number_of_provider_initiated_individuals_diagnosed_HIV_positive",\
SUM(table3."Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC")"Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC" \
from( \
select \
table2."SACS_ID" ,\
table2."SACS",\
table2."Received_Month",\
table2."Received_Year",\
SUM(table2."Number_of_individuals_received_pre-test_counseling/information")"Number_of_individuals_received_pre-test_counseling/information",\
SUM(table2."Number_of_individuals_receiving_post-test_counseling_and_given_results")"Number_of_individuals_receiving_post-test_counseling_and_given_results",\
SUM(table2."Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling")"Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling",\
SUM(table2."Number_of_individuals_tested_for_HIV")"Number_of_individuals_tested_for_HIV",\
SUM(table2."Number_of_individuals_received_result_within_7_days_of_HIV_Test")"Number_of_individuals_received_result_within_7_days_of_HIV_Test",\
SUM(table2."Number_of_HIV_positive_individuals_having_HIV-I_infection")"Number_of_HIV_positive_individuals_having_HIV-I_infection",\
SUM(table2."Number_of_HIV_positive_individuals_having_HIV-II_infection")"Number_of_HIV_positive_individuals_having_HIV-II_infection",\
SUM(table2."Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections")"Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections",\
SUM(table2."Number_of_individuals_tested_for_HIV_and_found_Negative")"Number_of_individuals_tested_for_HIV_and_found_Negative",\
SUM(table2."Number_of_Self-initiated_Individuals_tested_for_HIV")"Number_of_Self-initiated_Individuals_tested_for_HIV",\
SUM(table2."Number_of_Self-initiated_individuals_diagnosed_HIV_positive")"Number_of_Self-initiated_individuals_diagnosed_HIV_positive",\
SUM(table2."Number_of_provider_initiated_Individuals_tested_for_HIV")"Number_of_provider_initiated_Individuals_tested_for_HIV",\
SUM(table2."Number_of_provider_initiated_individuals_diagnosed_HIV_positive")"Number_of_provider_initiated_individuals_diagnosed_HIV_positive",\
SUM(table2."Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC")"Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC"\
\
from\
(\
Select \
T2.ID,\
T2."SACS",\
T2."ICTC_center",\
T2."SACS_ID", \
CASE WHEN T2."Received_Month" = 1 THEN '"'January'"' \
WHEN T2."Received_Month" = 2 THEN '"'February'"' \
WHEN T2."Received_Month" = 3 THEN '"'March'"' \
WHEN T2."Received_Month" = 4 THEN '"'April'"' \
WHEN T2."Received_Month" = 5 THEN '"'May'"' \
WHEN T2."Received_Month" = 6 THEN '"'June'"' \
WHEN T2."Received_Month" = 7 THEN '"'July'"' \
WHEN T2."Received_Month" = 8 THEN '"'August'"' \
WHEN T2."Received_Month" = 9 THEN '"'September'"' \
WHEN T2."Received_Month" = 10 THEN '"'October'"' \
WHEN T2."Received_Month" = 11 THEN '"'November'"' \
WHEN T2."Received_Month" = 12 THEN '"'December'"' \
END as "Received_Month",\
T2."Received_Year",\
SUM(T2."Number_of_individuals_received_pre-test_counseling/information")"Number_of_individuals_received_pre-test_counseling/information",\
SUM(T2."Number_of_individuals_receiving_post-test_counseling_and_given_results")"Number_of_individuals_receiving_post-test_counseling_and_given_results",\
SUM(T2."Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling")"Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling",\
SUM(T3."Number_of_individuals_tested_for_HIV")"Number_of_individuals_tested_for_HIV",\
SUM(T4."Number_of_individuals_received_result_within_7_days_of_HIV_Test")"Number_of_individuals_received_result_within_7_days_of_HIV_Test",\
SUM(T5."Number_of_HIV_positive_individuals_having_HIV-I_infection")"Number_of_HIV_positive_individuals_having_HIV-I_infection",\
SUM(T5."Number_of_HIV_positive_individuals_having_HIV-II_infection")"Number_of_HIV_positive_individuals_having_HIV-II_infection",\
SUM(T5."Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections")"Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections",\
SUM(T6."Number_of_individuals_tested_for_HIV_and_found_Negative")"Number_of_individuals_tested_for_HIV_and_found_Negative",\
SUM(T7."Number_of_Self-initiated_Individuals_tested_for_HIV")"Number_of_Self-initiated_Individuals_tested_for_HIV",\
SUM(T8."Number_of_Self-initiated_individuals_diagnosed_HIV_positive")"Number_of_Self-initiated_individuals_diagnosed_HIV_positive",\
SUM(T9."Number_of_provider_initiated_Individuals_tested_for_HIV")"Number_of_provider_initiated_Individuals_tested_for_HIV",\
SUM(T10."Number_of_provider_initiated_individuals_diagnosed_HIV_positive")"Number_of_provider_initiated_individuals_diagnosed_HIV_positive",\
SUM(T11."Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC")"Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC"\
from(\
select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
case When iv.BENEFICIARY_STATUS=1 Then \
(cast(count(iben.BENEFICIARY_ID)as numeric)) Else 0 End as "Number_of_individuals_received_pre-test_counseling/information",\
case When iv.BENEFICIARY_STATUS=4 Then \
(cast(count(iben.BENEFICIARY_ID)as numeric)) Else 0 End as "Number_of_individuals_receiving_post-test_counseling_and_given_results",\
case When iv.BENEFICIARY_STATUS=5 Then \
(cast(count(iben.BENEFICIARY_ID)as numeric)) Else 0 End as "Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling",\
\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and \
iv.BENEFICIARY_STATUS in (1,4,5)\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"' \
and f.is_active = '"'true'"' \
and f_sacs.is_active = '"'true'"' \
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"' \
and ft.is_active = '"'true'"' \
group by\
f.id, b.gender,f_sacs.name,f_sacs.id,\
f.name,iv.BENEFICIARY_STATUS,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date))T2\
\
full outer join(\
select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric) as "Number_of_individuals_tested_for_HIV",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and itr.tested_date is not null\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,\
b.gender,f_sacs.name,f_sacs.id,\
f.name,iv.BENEFICIARY_STATUS,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date))T3 on (T2.ID=T3.ID and T2."SACS_ID"=T3."SACS_ID" and T2."Received_Month"=T3."Received_Month" and T2."Received_Year"=T3."Received_Year")\
full outer join \
(select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric)as "Number_of_individuals_received_result_within_7_days_of_HIV_Test",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and itr.tested_date is not null\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and cast((cast(isc.sample_collection_date AS DATE) - cast(itr.report_received_date AS DATE))day as numeric) <=7\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),isc.sample_collection_date,\
itr.report_received_date\
)T4 on (T2.ID=T4.ID and T2."SACS_ID"=T4."SACS_ID" and T2."Received_Month"=T4."Received_Month" and T2."Received_Year"=T4."Received_Year")\
\
full outer join (\
select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
\
Case When itr.hiv_type=1 then (cast(count(iben.BENEFICIARY_ID)as numeric))Else 0 End as "Number_of_HIV_positive_individuals_having_HIV-I_infection",\
Case When itr.hiv_type=2 then (cast(count(iben.BENEFICIARY_ID)as numeric))Else 0 End as "Number_of_HIV_positive_individuals_having_HIV-II_infection",\
Case When itr.hiv_type=3 then (cast(count(iben.BENEFICIARY_ID)as numeric))Else 0 End as "Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections",\
\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and itr.hiv_type in (1,2,3)\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by \
f.id,b.gender,f_sacs.name,f_sacs.id,\
f.name,iv.BENEFICIARY_STATUS,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T5 on (T2.ID=T5.ID and T2."SACS_ID"=T5."SACS_ID" and T2."Received_Month"=T5."Received_Month" and T2."Received_Year"=T5."Received_Year")\
\
full outer join \
(select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
Case When itr.hiv_status=1 then (cast(count(iben.BENEFICIARY_ID)as numeric))Else 0 End as "Number_of_individuals_tested_for_HIV_and_found_Negative",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and itr.hiv_status in (1)\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,itr.hiv_status,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T6 on (T2.ID=T6.ID and T2."SACS_ID"=T6."SACS_ID" and T2."Received_Month"=T6."Received_Month" and T2."Received_Year"=T6."Received_Year")\
\
full outer join (\
select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric) as "Number_of_Self-initiated_Individuals_tested_for_HIV",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and iben.referred_by is null\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"' \
group by\
f.id,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T7 on (T2.ID=T7.ID and T2."SACS_ID"=T7."SACS_ID" and T2."Received_Month"=T7."Received_Month" and T2."Received_Year"=T7."Received_Year")\
\
full outer join (select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric) as "Number_of_Self-initiated_individuals_diagnosed_HIV_positive",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and iben.referred_by is null and itr.hiv_status in (1)\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T8 on (T2.ID=T8.ID and T2."SACS_ID"=T8."SACS_ID" and T2."Received_Month"=T8."Received_Month" and T2."Received_Year"=T8."Received_Year")\
\
full outer join \
(select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric) as "Number_of_provider_initiated_Individuals_tested_for_HIV",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and iben.referred_by is not null\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T9 on (T2.ID=T9.ID and T2."SACS_ID"=T9."SACS_ID" and T2."Received_Month"=T9."Received_Month" and T2."Received_Year"=T9."Received_Year")\
full outer join \
(select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric) as "Number_of_provider_initiated_individuals_diagnosed_HIV_positive",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (11,13) and f_sacs.facility_type_id in (2) and iben.referred_by is not null and itr.hiv_status in (1)\
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T10 on (T2.ID=T10.ID and T2."SACS_ID"=T10."SACS_ID" and T2."Received_Month"=T10."Received_Month" and T2."Received_Year"=T10."Received_Year")\
full outer join \
(select \
f.ID, \
f_sacs.name as "SACS",\
f.name as "ICTC_center",\
f_sacs.id as "SACS_ID",\
cast(count(iben.BENEFICIARY_ID)as numeric) as "Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC",\
extract(month from iben.registration_date) as "Received_Month",\
extract(year from iben.registration_date) as "Received_Year"\
FROM ICTC_BENEFICIARY as iben \
JOIN BENEFICIARY as b on (iben.BENEFICIARY_ID = b.ID)\
JOIN ICTC_SAMPLE_COLLECTION as isc on (iben.ID = isc.ICTC_BENEFICIARY_ID)\
JOIN FACILITY as f on (iben.FACILITY_ID = f.ID)\
JOIN FACILITY as f_sacs on (f_sacs.id=f.sacs_id)\
JOIN ICTC_VISIT as iv on (isc.VISIT_ID = iv.ID)\
JOIN FACILITY_TYPE as ft on (f.FACILITY_TYPE_ID = ft.ID)\
JOIN ICTC_TEST_RESULT as itr on (iv.ID = itr.VISIT_ID)\
where f.facility_type_id in (10,11,13) and f_sacs.facility_type_id in (2) and itr.hiv_status in (3) \
and iv.IS_PREGNANT = '"'true'"'\
and b.gender in ('"'female'"')\
and iben.is_active = '"'true'"'\
and b.is_active = '"'true'"' \
and isc.is_active = '"'true'"'\
and f.is_active = '"'true'"'\
and f_sacs.is_active = '"'true'"'\
and iv.is_active = '"'true'"' \
and itr.is_active = '"'true'"'\
and ft.is_active = '"'true'"'\
group by\
f.id,b.gender,\
f.name,iv.BENEFICIARY_STATUS,f_sacs.name,f_sacs.id,\
extract(month from iben.registration_date),\
extract(year from iben.registration_date),itr.hiv_type)T11 \
on (T2.ID=T11.ID and T2."SACS_ID"=T11."SACS_ID" and T2."Received_Month"=T11."Received_Month" and T2."Received_Year"=T11."Received_Year")\
group by \
T2.ID,\
T2."SACS",\
T2."ICTC_center",\
T2."SACS_ID",\
T2."Received_Month",\
T2."Received_Year"\
\
)table2\
group by\
table2."SACS_ID" ,\
table2."SACS",\
table2."Received_Month", \
table2."Received_Year" \
)table3\
group by \
\
table3."Received_Month", \
table3."Received_Year"'
#Execute query
xl_df = pd.read_sql(sql, conn.connect())
return xl_df
def create_report():
#Get dataframe
df = fetch_data()
# Start by opening the spreadsheet and selecting the main sheet
workbook = load_workbook(filename='templates\\ictc_report_section_i_template.xlsx')
sheet = workbook.active
#Check if DF is empty
if df.empty:
print('DataFrame is empty!')
else:# Write what you want into a specific cell
print(df)
sheet["H9"] = df['Number_of_individuals_received_pre-test_counseling/information']
sheet["H10"] = df['Number_of_individuals_tested_for_HIV']
sheet["H11"] = df['Number_of_individuals_receiving_post-test_counseling_and_given_results']
sheet["H12"] = df['Number_of_individuals_received_result_within_7_days_of_HIV_Test']
sheet["H13"] = 0 # once mapping available, update it.
sheet["H14"] = df['Number_of_HIV_positive_individuals_having_HIV-I_infection']
sheet["H15"] = df['Number_of_HIV_positive_individuals_having_HIV-II_infection']
sheet["H16"] = df['Number_of_HIV_positive_individuals_having_both_HIV-I_&_II_infections']
sheet["H17"] = df['Number_of_individuals_tested_for_HIV_and_found_Negative']
sheet["H18"] = df['Number_of_individuals_with_High_Risk_Behavior_received_follow-up_counseling']
sheet["H19"] = df['Number_of_Self-initiated_Individuals_tested_for_HIV']
sheet["H20"] = df['Number_of_Self-initiated_individuals_diagnosed_HIV_positive']
sheet["H21"] = df['Number_of_provider_initiated_Individuals_tested_for_HIV']
sheet["H22"] = df['Number_of_provider_initiated_individuals_diagnosed_HIV_positive']
sheet["H123"] = df['Total_number_of_individuals_turned_Indeterminate_for_HIV_at_SA_ICTC']
# Save the spreadsheet
now = datetime.datetime.now()
pref = now.strftime('%Y_%b_')
workbook.save(filename='reports\\ictc_report_' + pref + '_section_i national pregnant.xlsx')
print ('*** Excel report created.')
# Test the Function
if __name__=="__main__":
create_report()
```
#### File: FileCompare/Common/readCompareConfig.py
```python
def readConfig(filename):
configlist = [] # the main list with all confg elements
with open(filename) as f:
for line in f:
if not (line.startswith("#")):
list = line.strip('\n').split('=')
# a two element list for each element in config list
configlist.append([list[0].split('.'), list[1].split('.')])
return configlist
#End of Function
#Test the function
if __name__=="__main__":
config = "config.txt"
print (readConfig(config))
```
#### File: Python_Codes/FileCompare/compare_file.py
```python
import Common.readCompareConfig as rc
import Json.json_file_processor as jfp
#Compare the elements - list or dictionary or anything else
def compare_data(source_data_a,source_data_b):
def compare(data_a,data_b):
# type: list
if (isinstance(data_a, list)):
#print("Comparing lists: {a} and {b}".format(a=data_a, b=data_b))
# is [data_b] a list and of same length as [data_a]?
if (
not (isinstance(data_b, list)) or
(len(data_a) != len(data_b))
):
return False
else:
# Sort the lists
#data_a.sort()
#data_b.sort()
# iterate over list items
for list_index,list_item in enumerate(data_a):
# compare [data_a] list item against [data_b] at index
if (not compare(list_item,data_b[list_index])):
return False
# list identical
return True
# type: dictionary
elif (type(data_a) is dict):
#print("Comparing dicts: {a} and {b}".format(a=data_a, b=data_b))
# is [data_b] a dictionary?
if (type(data_b) != dict):
return False
# iterate over dictionary keys
for dict_key,dict_value in data_a.items():
# key exists in [data_b] dictionary, and same value?
if (
(dict_key not in data_b) or
(not compare(dict_value,data_b[dict_key]))
):
return False
# dictionary identical
return True
# simple value - compare both value and type for equality
else:
#print("Comparing values: {a} and {b}".format(a=data_a, b=data_b))
return data_a == data_b
# compare b to a in recursion unless meet the base condition
return compare(source_data_b,source_data_a)
#End of compare
# Compare the data elements based on configuration and file type
def compareConfigBased(elemList, file1_list,file1_list_count,file2_list,file2_list_count):
if (elemList == []):
return False
value1 = jfp.getValueFromJsonFile(elemList[0],file1_list,file1_list_count)
value2 = jfp.getValueFromJsonFile(elemList[1],file2_list,file2_list_count)
#print (value1, value2)
return compare_data(value1,value2)
#End of Function
def run_compare():
file1 = "Json/a.json"
file2 = "Json/b.json"
cfile = "Json/json_comp_config.txt"
#Get first file loaded
flat_json_1, lCount_1 = jfp.flatten_json(jfp.loadJson(file1))
#print ("Flattened JSON ----",flat_json_1)
#print ("List of the counts ----", lCount_1)
#Get second file loaded
flat_json_2, lCount_2 = jfp.flatten_json(jfp.loadJson(file2))
#Get configuration fle loaded
lst = rc.readConfig(cfile)
#Compare
for m in range(0,len(lst)):
if (compareConfigBased (lst[m],flat_json_1, lCount_1,flat_json_2, lCount_2)):
print ("Good: Values for {l} and {r} matched".format(l=lst[m][0],r=lst[m][1]))
else:
print ("Error: Values for {l} and {r} not matched".format(l=lst[m][0],r=lst[m][1]))
# Test the Function
if __name__=="__main__":
run_compare()
```
#### File: Python_Codes/Grokking_Algo/quicksort.py
```python
import random
def quicksort(list):
if len(list) < 2:
return list
else:
pi = random.randint(0,len(list)-1)
#pi = 0
print ("The list is {l} and random index is {i}".format(l=list,i=pi))
pivot = list.pop(pi)
less = [i for i in list if i <= pivot]
more = [i for i in list if i > pivot]
return quicksort(less) + [pivot] + quicksort(more)
#End of function
l=[2,3,6,7,4,6,9,11,-1,5]
print ("The sorted list is - ",quicksort(l))
``` |
{
"source": "2anirban/LSTM-Stock-Predictor",
"score": 2
} |
#### File: site-packages/structlog/dev.py
```python
from __future__ import absolute_import, division, print_function
from six import StringIO
try:
import colorama
except ImportError:
colorama = None
__all__ = [
"ConsoleRenderer",
]
_MISSING = (
"{who} requires the {package} package installed. "
"If you want to use the helpers from structlog.dev, it is strongly "
"recommended to install structlog using `pip install structlog[dev]`."
)
_EVENT_WIDTH = 30 # pad the event name to so many characters
def _pad(s, l):
"""
Pads *s* to length *l*.
"""
missing = l - len(s)
return s + " " * (missing if missing > 0 else 0)
if colorama is not None:
RESET_ALL = colorama.Style.RESET_ALL
BRIGHT = colorama.Style.BRIGHT
DIM = colorama.Style.DIM
RED = colorama.Fore.RED
BLUE = colorama.Fore.BLUE
CYAN = colorama.Fore.CYAN
MAGENTA = colorama.Fore.MAGENTA
YELLOW = colorama.Fore.YELLOW
GREEN = colorama.Fore.GREEN
class ConsoleRenderer(object):
"""
Render `event_dict` nicely aligned, in colors, and ordered.
:param int pad_event: Pad the event to this many characters.
Requires the colorama_ package.
.. _colorama: https://pypi.python.org/pypi/colorama/
.. versionadded:: 16.0.0
"""
def __init__(self, pad_event=_EVENT_WIDTH):
if colorama is None:
raise SystemError(
_MISSING.format(
who=self.__class__.__name__,
package="colorama"
)
)
colorama.init()
self._pad_event = pad_event
self._level_to_color = {
"critical": RED,
"exception": RED,
"error": RED,
"warn": YELLOW,
"warning": YELLOW,
"info": GREEN,
"debug": GREEN,
"notset": colorama.Back.RED,
}
for key in self._level_to_color.keys():
self._level_to_color[key] += BRIGHT
self._longest_level = len(max(
self._level_to_color.keys(),
key=lambda e: len(e)
))
def __call__(self, _, __, event_dict):
sio = StringIO()
ts = event_dict.pop("timestamp", None)
if ts is not None:
sio.write(
# can be a number if timestamp is UNIXy
DIM + str(ts) + RESET_ALL + " "
)
level = event_dict.pop("level", None)
if level is not None:
sio.write(
"[" + self._level_to_color[level] +
_pad(level, self._longest_level) +
RESET_ALL + "] "
)
sio.write(
BRIGHT +
_pad(event_dict.pop("event"), self._pad_event) +
RESET_ALL + " "
)
logger_name = event_dict.pop("logger", None)
if logger_name is not None:
sio.write(
"[" + BLUE + BRIGHT +
logger_name + RESET_ALL +
"] "
)
stack = event_dict.pop("stack", None)
exc = event_dict.pop("exception", None)
sio.write(
" ".join(
CYAN + key + RESET_ALL +
"=" +
MAGENTA + repr(event_dict[key]) +
RESET_ALL
for key in sorted(event_dict.keys())
)
)
if stack is not None:
sio.write("\n" + stack)
if exc is not None:
sio.write("\n\n" + "=" * 79 + "\n")
if exc is not None:
sio.write("\n" + exc)
return sio.getvalue()
``` |
{
"source": "2ashish/bidding-game",
"score": 3
} |
#### File: 2ashish/bidding-game/source.py
```python
import random
def create_sample():
nn = []
for i in range(35):
nn.append(random.random()/10)
return nn
def first_pop(pop_size):
pop = []
for i in range(pop_size):
pop.append(create_sample())
return pop
def evalnn(nn,coin1,coin2,pos1,pos2,draw):
coin1/=100
coin2/=100
pos1/=10
pos2/=10
nn[25]+= coin1*nn[0] + pos1*nn[5] + coin2*nn[10] + pos2*nn[15] + draw*nn[20]
nn[26]+= coin1*nn[1] + pos1*nn[6] + coin2*nn[11] + pos2*nn[16] + draw*nn[21]
nn[27]+= coin1*nn[2] + pos1*nn[7] + coin2*nn[12] + pos2*nn[17] + draw*nn[22]
nn[28]+= coin1*nn[3] + pos1*nn[8] + coin2*nn[13] + pos2*nn[18] + draw*nn[23]
nn[29]+= coin1*nn[4] + pos1*nn[9] + coin2*nn[14] + pos2*nn[19] + draw*nn[24]
ans = nn[25]*nn[30] + nn[26]*nn[31] + nn[27]*nn[32] + nn[28]*nn[33] +nn[29]*nn[34]
ans = int(ans*coin1*100)
if(ans<0):
ans = 0
if(ans>coin1*100):
ans = coin1*100
return ans
def disp(pos,coin1,coin2,bid1,bid2,draw):
seq =""
for i in range(11):
if i==pos:
seq+="x"
else:
seq+="o"
print(seq,coin1,coin2,bid1,bid2,draw)
input()
def play(nn1,nn2,draw):
pos =5
coin1 = 100
coin2 = 100
fit1 = 0
fit2 = 0
#disp(pos,coin1,coin2,0,0,draw)
move =0
while pos!=0 and pos!=10 and move<200:
move+=1
bid1 = evalnn(nn1,coin1,coin2,pos,10-pos,draw)
bid2 = evalnn(nn2,coin2,coin1,10-pos,pos,-1*draw)
if(draw ==1):
if(bid1>bid2):
pos-=1
coin1-=bid1
fit1-=bid1-bid2
if(bid2>bid1):
pos+=1
coin2-=bid2
fit2-=bid2-bid1
if(bid1==bid2):
pos-=1
coin1-=bid1
draw*=-1
else:
if(bid1>bid2):
pos-=1
coin1-=bid1
fit1-=bid1-bid2
if(bid2>bid1):
pos+=1
coin2-=bid2
fit2-=bid2-bid1
if(bid1==bid2):
pos+=1
coin2-=bid2
draw*=-1
#print(fit1,fit2)
#disp(pos,coin1,coin2,bid1,bid2,draw)
if coin1==0:
#print("player 2 wins")
fit1-=200
break
if coin2==0:
#print("player 1 wins")
fit2=-200
break
if pos==0:
fit1+=100
fit2-=100
if pos==10:
fit1-=100
fit2+=100
if move==200:
fit1-=100
fit2-=100
#print(fit1,fit2)
return fit1
def pop_fitness(new_pop,prev_pop):
fit_pop = {}
#print(new_pop[0])
for nn1 in range(len(new_pop)):
fit = 0
for nn2 in range(len(prev_pop)):
fit+=play(new_pop[nn1],prev_pop[nn2],1)
#print(fit)
fit_pop[str(nn1)] = fit
return sorted(fit_pop.items(), key = lambda t: t[1],reverse = True )
# def select_pop(fit_pop,pop,pop_size):
# for i
nn1 = create_sample()
nn2 = create_sample()
#fit = play(nn1,nn2,1)
#print(fit)
pop_size = 50
max_gen = 1
new_pop = first_pop(pop_size)
prev_pop = first_pop(2*pop_size/5)
for gen in range(max_gen):
fit_pop = pop_fitness(new_pop,prev_pop)
for i in range(50):
print(fit_pop[i][1])
``` |
{
"source": "2ashish/NLP-Answering-Reading-Comprehension",
"score": 3
} |
#### File: 2ashish/NLP-Answering-Reading-Comprehension/fastqa.py
```python
from __future__ import print_function
from __future__ import division
import numpy as np
from keras import backend as K
import keras
from keras.models import Model
from keras.layers import Input, Dense, RepeatVector, Masking, Dropout, Flatten, Activation, Reshape, Lambda, Permute, merge, multiply, concatenate
from keras.layers.merge import Concatenate
from keras.layers.wrappers import Bidirectional, TimeDistributed
from keras.layers.recurrent import GRU, LSTM
from keras.layers.pooling import GlobalMaxPooling1D
class FastQA(Model):
def __init__(self, inputs=None, outputs=None,
N=None, M=None, unroll=False,
hdim=300, word2vec_dim=300, dropout_rate=0.2,
**kwargs):
# Load model from config
if inputs is not None and outputs is not None:
super(FastQA, self).__init__(inputs=inputs,
outputs=outputs,
**kwargs)
return
'''Dimensions'''
B = None
H = hdim
W = word2vec_dim
'''Inputs'''
P = Input(shape=(N, W), name='P')
Q = Input(shape=(M, W), name='Q')
'''Word in question binary'''
def wiq_feature(P, Q):
'''
Binary feature mentioned in the paper.
For each word in passage returns if that word is present in question.
'''
slice = []
for i in range(N):
word_sim = K.tf.equal(W, K.tf.reduce_sum(
K.tf.cast(K.tf.equal(K.tf.expand_dims(P[:, i, :], 1), Q), K.tf.int32), axis=2))
question_sim = K.tf.equal(M, K.tf.reduce_sum(K.tf.cast(word_sim, K.tf.int32), axis=1))
slice.append(K.tf.cast(question_sim, K.tf.float32))
wiqout = K.tf.expand_dims(K.tf.stack(slice, axis=1), 2)
return wiqout
wiq_p = Lambda(lambda arg: wiq_feature(arg[0], arg[1]))([P, Q])
wiq_q = Lambda(lambda q: K.tf.ones([K.tf.shape(Q)[0], M, 1], dtype=K.tf.float32))(Q)
passage_input = P
question_input = Q
# passage_input = Lambda(lambda arg: concatenate([arg[0], arg[1]], axis=2))([P, wiq_p])
# question_input = Lambda(lambda arg: concatenate([arg[0], arg[1]], axis=2))([Q, wiq_q])
'''Encoding'''
encoder = Bidirectional(LSTM(units=W,
return_sequences=True,
dropout=dropout_rate,
unroll=unroll))
passage_encoding = passage_input
passage_encoding = encoder(passage_encoding)
passage_encoding = TimeDistributed(
Dense(W,
use_bias=False,
trainable=True,
weights=np.concatenate((np.eye(W), np.eye(W)), axis=1)))(passage_encoding)
question_encoding = question_input
question_encoding = encoder(question_encoding)
question_encoding = TimeDistributed(
Dense(W,
use_bias=False,
trainable=True,
weights=np.concatenate((np.eye(W), np.eye(W)), axis=1)))(question_encoding)
'''Attention over question'''
# compute the importance of each step
question_attention_vector = TimeDistributed(Dense(1))(question_encoding)
question_attention_vector = Lambda(lambda q: keras.activations.softmax(q, axis=1))(question_attention_vector)
# apply the attention
question_attention_vector = Lambda(lambda q: q[0] * q[1])([question_encoding, question_attention_vector])
question_attention_vector = Lambda(lambda q: K.sum(q, axis=1))(question_attention_vector)
question_attention_vector = RepeatVector(N)(question_attention_vector)
'''Answer span prediction'''
# Answer start prediction
answer_start = Lambda(lambda arg:
concatenate([arg[0], arg[1], arg[2]]))([
passage_encoding,
question_attention_vector,
multiply([passage_encoding, question_attention_vector])])
answer_start = TimeDistributed(Dense(W, activation='relu'))(answer_start)
answer_start = TimeDistributed(Dense(1))(answer_start)
answer_start = Flatten()(answer_start)
answer_start = Activation('softmax')(answer_start)
# Answer end prediction depends on the start prediction
def s_answer_feature(x):
maxind = K.argmax(
x,
axis=1,
)
return maxind
x = Lambda(lambda x: K.tf.cast(s_answer_feature(x), dtype=K.tf.int32))(answer_start)
start_feature = Lambda(lambda arg: K.tf.gather_nd(arg[0], K.tf.stack(
[K.tf.range(K.tf.shape(arg[1])[0]), K.tf.cast(arg[1], K.tf.int32)], axis=1)))([passage_encoding, x])
start_feature = RepeatVector(N)(start_feature)
# Answer end prediction
answer_end = Lambda(lambda arg: concatenate([
arg[0],
arg[1],
arg[2],
multiply([arg[0], arg[1]]),
multiply([arg[0], arg[2]])
]))([passage_encoding, question_attention_vector, start_feature])
answer_end = TimeDistributed(Dense(W, activation='relu'))(answer_end)
answer_end = TimeDistributed(Dense(1))(answer_end)
answer_end = Flatten()(answer_end)
answer_end = Activation('softmax')(answer_end)
input_placeholders = [P, Q]
inputs = input_placeholders
outputs = [answer_start, answer_end]
super(FastQA, self).__init__(inputs=inputs,
outputs=outputs,
**kwargs)
if __name__ == "__main__":
model = FastQA(hdim=50, N=50, M=30, dropout_rate=0.2)
``` |
{
"source": "2AUK/pyrism",
"score": 3
} |
#### File: pyrism/Closures/closure_dispatcher.py
```python
import numpy as np
from .closure_routines import *
class Closure(object):
closure_dispatcher = {
"HNC": HyperNetted_Chain,
"KH": KovalenkoHirata,
"PSE-1": PSE_1,
"PSE-2": PSE_2,
"PSE-3": PSE_3,
"PY": PercusYevick,
}
def __init__(self, clos):
self.closure = clos
def get_closure(self):
return self.closure_dispatcher[self.closure]
```
#### File: pyrism/IntegralEquations/DRISM.py
```python
import numpy as np
from Core import RISM_Obj
from dataclasses import dataclass, field
import Util
from scipy.special import spherical_jn
@dataclass
class DRISM(object):
data_vv: RISM_Obj
diel: float
adbcor: float
data_uv: RISM_Obj = None
chi: np.ndarray = field(init=False)
h_c0: float = field(init=False)
y: float = field(init=False)
def compute_vv(self):
I = np.eye(self.data_vv.ns1, M=self.data_vv.ns2, dtype=np.float64)
ck = np.zeros((self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.float64)
w_bar = np.zeros((self.data_vv.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.float64)
k = self.data_vv.grid.ki
r = self.data_vv.grid.ri
#print(self.data_vv.h)
for i, j in np.ndindex(self.data_vv.ns1, self.data_vv.ns2):
ck[:, i, j] = self.data_vv.grid.dht(self.data_vv.c[:, i, j])
ck[:, i, j] -= self.data_vv.B * self.data_vv.uk_lr[:, i, j]
for i in range(self.data_vv.grid.npts):
chi = self.chi
w_bar[i] = (self.data_vv.w[i] + self.data_vv.p @ chi[i])
iwcp = np.linalg.inv(I - w_bar[i] @ ck[i] @ self.data_vv.p)
wcw = (w_bar[i] @ ck[i] @ w_bar[i])
self.data_vv.h[i] = (iwcp @ wcw) + (chi[i])
for i, j in np.ndindex(self.data_vv.ns1, self.data_vv.ns2):
self.data_vv.t[:, i, j] = self.data_vv.grid.idht(self.data_vv.h[:, i, j] - ck[:, i, j]) - (
self.data_vv.B * self.data_vv.ur_lr[:, i, j])
#print(self.data_vv.h)
def compute_uv(self):
if self.data_uv is not None:
I = np.eye(self.data_uv.ns1, M=self.data_uv.ns2)
ck_uv = np.zeros((self.data_uv.npts, self.data_uv.ns1, self.data_uv.ns2), dtype=np.float64)
for i, j in np.ndindex(self.data_uv.ns1, self.data_uv.ns2):
ck_uv[:, i, j] = self.data_uv.grid.dht(self.data_uv.c[:, i, j])
ck_uv[:, i, j] -= self.data_uv.B * self.data_uv.uk_lr[:, i, j]
for i in range(self.data_uv.grid.npts):
self.data_uv.h[i] = (self.data_uv.w[i] @ ck_uv[i]) @ (self.data_vv.w[i] + self.data_vv.p @ self.data_vv.h[i])
for i, j in np.ndindex(self.data_uv.ns1, self.data_uv.ns2):
self.data_uv.t[:, i, j] = self.data_uv.grid.idht(self.data_uv.h[:, i, j] - ck_uv[:, i, j]) - (
self.data_uv.B * self.data_uv.ur_lr[:, i, j])
else:
raise RuntimeError("uv dataclass not defined")
def calculate_DRISM_params(self):
total_density = 0
Util.align_dipole(self.data_vv)
dm, _ = Util.dipole_moment(self.data_vv)
for isp in self.data_vv.species:
total_density += isp.dens
dmdensity = total_density * dm * dm
ptxv = self.data_vv.species[0].dens / total_density
self.y = 4.0 * np.pi * dmdensity / 9.0
self.h_c0 = (((self.diel - 1.0) / self.y) - 3.0) / (total_density * ptxv)
def D_matrix(self):
d0x = np.zeros((self.data_vv.ns1), dtype=np.float)
d0y = np.zeros((self.data_vv.ns1), dtype=np.float)
d1z = np.zeros((self.data_vv.ns1), dtype=np.float)
for ki, k in enumerate(self.data_vv.grid.ki):
hck = self.h_c0 * np.exp(-np.power((self.adbcor * k / 2.0), 2))
i = -1
for isp in self.data_vv.species:
for iat in isp.atom_sites:
i += 1
k_coord = k*iat.coords
if k_coord[0] == 0.0:
d0x[i] = 1.0
else:
d0x[i] = Util.j0(k_coord[0])
if k_coord[1] == 0.0:
d0y[i] = 1.0
else:
d0y[i] = Util.j0(k_coord[1])
if k_coord[2] == 0.0:
d1z[i] = 0.0
else:
d1z[i] = Util.j1(k_coord[2])
for i, j in np.ndindex((self.data_vv.ns1, self.data_vv.ns2)):
self.chi[ki, i, j] = d0x[i] * d0y[i] * d1z[i] * hck * d0x[j] * d0y[j] * d1z[j]
def __post_init__(self):
self.calculate_DRISM_params()
self.chi = np.zeros((self.data_vv.grid.npts, self.data_vv.ns1, self.data_vv.ns2), dtype=np.float)
self.D_matrix()
def vv_impl():
pass
def uv_impl():
pass
```
#### File: pyrism/IntegralEquations/XRISM_UV.py
```python
import numpy as np
from Core import RISM_Obj
def XRISM_UV(data_vv, data_uv):
I = np.eye(data_uv.ns1, M=data_uv.ns2)
ck_uv = np.zeros((data_uv.npts, data_uv.ns1, data_uv.ns2), dtype=np.float64)
for i, j in np.ndindex(data_uv.ns1, data_uv.ns2):
ck_uv[:, i, j] = data_uv.grid.dht(data_uv.c[:, i, j])
ck_uv[:, i, j] -= data_uv.B * data_uv.uk_lr[:, i, j]
for i in range(data_uv.grid.npts):
data_uv.h[i] = (data_uv.w[i] @ ck_uv[i]) @ (data_vv.w[i] + data_vv.p @ data_vv.h[i])
for i, j in np.ndindex(data_uv.ns1, data_uv.ns2):
data_uv.t[:, i, j] = data_uv.grid.idht(data_uv.h[:, i, j] - ck_uv[:, i, j]) - (
data_uv.B * data_uv.ur_lr[:, i, j])
```
#### File: pyrism/Solvers/Ng.py
```python
import numpy as np
from Core import RISM_Obj
from .Solver_object import *
from dataclasses import dataclass, field
import pdb
@dataclass
class NgSolver(SolverObject):
fr: list = field(init=False, default_factory=list)
gr: list = field(init=False, default_factory=list)
def step_Picard(self, curr, prev):
self.fr.append(prev)
self.gr.append(curr)
return prev + self.damp_picard * (curr - prev)
def step_Ng(self, curr, prev, A, b):
vecdr = np.asarray(self.gr) - np.asarray(self.fr)
dn = vecdr[-1].flatten()
d01 = (vecdr[-1] - vecdr[-2]).flatten()
d02 = (vecdr[-1] - vecdr[-3]).flatten()
A[0, 0] = np.inner(d01, d01)
A[0, 1] = np.inner(d01, d02)
A[1, 0] = np.inner(d01, d02)
A[1, 1] = np.inner(d02, d02)
b[0] = np.inner(dn, d01)
b[1] = np.inner(dn, d02)
c = np.linalg.solve(A, b)
c_next = (
(1 - c[0] - c[1]) * self.gr[-1] + c[0] * self.gr[-2] + c[1] * self.gr[-3]
)
self.fr.append(prev)
self.gr.append(curr)
self.gr.pop(0)
self.fr.pop(0)
return c_next
def solve(self, RISM, Closure, lam):
i: int = 0
A = np.zeros((2, 2), dtype=np.float64)
b = np.zeros(2, dtype=np.float64)
print("\nSolving solvent-solvent RISM equation...\n")
while i < self.max_iter:
#self.epilogue(i, lam)
c_prev = self.data_vv.c
RISM()
c_A = Closure(self.data_vv)
if i < 3:
c_next = self.step_Picard(c_A, c_prev)
else:
c_next = self.step_Ng(c_A, c_prev, A, b)
if self.converged(c_next, c_prev):
self.epilogue(i, lam)
break
i += 1
if i == self.max_iter:
print("Max iteration reached!")
self.epilogue(i, lam)
break
self.data_vv.c = c_next
def solve_uv(self, RISM, Closure, lam):
i: int = 0
A = np.zeros((2, 2), dtype=np.float64)
b = np.zeros(2, dtype=np.float64)
print("\nSolving solute-solvent RISM equation...\n")
while i < self.max_iter:
c_prev = self.data_uv.c
RISM()
c_A = Closure(self.data_uv)
if i < 3:
c_next = self.step_Picard(c_A, c_prev)
else:
c_next = self.step_Ng(c_A, c_prev, A, b)
if self.converged(c_next, c_prev):
self.epilogue(i, lam)
break
i += 1
if i == self.max_iter:
print("Max iteration reached!")
self.epilogue(i, lam)
break
self.data_uv.c = c_next
```
#### File: pyrism/tests/test_pyrism.py
```python
import pyrism
import pytest
import sys
def test_pyrism_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "pyrism" in sys.modules
``` |
{
"source": "2AUK/SFED",
"score": 3
} |
#### File: 2AUK/SFED/sfed.py
```python
from gridData import Grid, OpenDX
from SFED_routines import *
import numpy as np
import sys
import argparse
import textwrap
from gridcollector import GridCollector
parser = argparse.ArgumentParser(epilog=textwrap.dedent('''\
The .dx files in your input directory need to be tagged with H, C and G
for the total correlation function, direct correlation function and
pair distribution function respectively.'''))
parser.add_argument("-d", "--directory", help=" Directory to be scanned containing dx files", required=True)
parser.add_argument("-i", "--input", help="Name of input molecule", required=True)
parser.add_argument("-c", "--closure", help="Closure for SFE functional [KH, HNC or GF]", required=True)
parser.add_argument("-o", "--output", help = "Output file name", required=True)
parser.add_argument("-T", "--temperature", help="Temperature of system (default = 300)", type=float, nargs="?", default=300)
parser.add_argument("-p", "--density", help="Density of system (default = 0.03342285869 [for water])", type=float, nargs="?", default=3.3422858685000001E-02)
#parser.add_argument("-t", "--tags", help="Suffix tags for scanning the correct .dx files (default = [\"H\", \"C\", \"G\"])", nargs="+", default=["H", "C", "G"])
parser.add_argument("-n", "--term", help="The values for n in the PSE-n closure", required=False)
args = parser.parse_args()
def epilogue(output_sfed, sample_grid, fname):
print("SFE (integrated SFED):\n", integrate_sfed(output_sfed, np.prod(sample_grid.delta)))
writedx(output_sfed, sample_grid, fname)
print("SFED written to " + fname + ".dx")
if __name__ == "__main__":
data_path = args.directory
mol_name = args.input
#suffixes = args.tags
grids = GridCollector(mol_name, data_path)
if args.closure == "KH":
output_sfed = sfed_kh_3drism(grids.grids["HO"].grid, grids.grids["CO"].grid, grids.grids["HH1"].grid, grids.grids["CH1"].grid, rho=args.density, T=args.temperature)
epilogue(output_sfed, grids.grids["HO"], args.output)
elif args.closure == "GF":
output_sfed = sfed_gf_3drism(grids.grids["HO"].grid, grids.grids["CO"].grid, grids.grids["HH1"].grid, grids.grids["CH1"].grid, rho=args.density, T=args.temperature)
epilogue(output_sfed, grids.grids["HO"], args.output)
elif args.closure == "HNC":
output_sfed = sfed_hnc_3drism(grids.grids["HO"].grid, grids.grids["CO"].grid, grids.grids["HH1"].grid, grids.grids["CH1"].grid, rho=args.density, T=args.temperature)
epilogue(output_sfed, grids.grids["HO"], args.output)
elif args.closure.startswith("PSE"):
if args.term is None:
parser.error("PSE-n closure requires a value for -n")
else:
output_sfed = sfed_psen_3drism(grids.grids["HO"].grid, grids.grids["CO"].grid, grids.grids["HH1"].grid, grids.grids["CH1"].grid, grids.grids["UO"].grid,grids.grids["UH1"].grid, float(args.term), rho=args.density, T=args.temperature)
epilogue(output_sfed, grids.grids["HO"], args.output)
else:
raise Exception("Unknown closure")
``` |
{
"source": "2b1a4d/RSA-with-GUI",
"score": 3
} |
#### File: 2b1a4d/RSA-with-GUI/RSA.py
```python
import base64
import rsa
import tkinter
from tkinter.filedialog import asksaveasfilename
from tkinter.filedialog import askopenfilename
#设定密钥长度
def get_key_length():
global key_length
key_length = input_key_length.get()
key_length = int(key_length)
#依据设定的密钥长度生成一对密钥
def generate_key():
global key_length
public_key,private_key = rsa.newkeys(key_length)
#保存公钥与私钥
public_key = public_key.save_pkcs1()
file_public_key = open(asksaveasfilename(title = '保存公钥')+'.txt','wb')
file_public_key.write(public_key)
private_key = private_key.save_pkcs1()
file_private_key = open(asksaveasfilename(title = '保存私钥')+'.txt','wb')
file_private_key.write(private_key)
#关文件
file_public_key.close()
file_private_key.close()
#导入公钥
def get_public_key():
global public_key
file_public_key = open(askopenfilename(),"rb")
file_public_key = file_public_key.read()
public_key = rsa.PublicKey.load_pkcs1(file_public_key)
#导入私钥
def get_private_key():
global private_key
file_private_key = open(askopenfilename(),"rb")
file_private_key = file_private_key.read()
private_key = rsa.PrivateKey.load_pkcs1(file_private_key)
#用公钥加密
def encrypt():
global public_key
#导入明文框文本编码为UTF-8并用已导入的公钥加密为密文
plain_text = io_plain_text.get(index1=0.0,index2="end")
plain_text = rsa.encrypt(plain_text.encode("UTF-8"),public_key)
#密文字节用base64编码并输出至密文框
plain_text = base64.b64encode(plain_text)
plain_text = plain_text.decode("UTF-8")
io_cipher_text.insert(0.0,plain_text)
#用私钥解密
def decrypt():
global private_key
#导入密文框base64编码并解码为原密文的字节
cipher_text = io_cipher_text.get(index1=0.0,index2="end")
cipher_text = base64.b64decode(cipher_text)
#密文用已导入的私钥解密并编码为UTF-8并输出至明文框
cipher_text = rsa.decrypt(cipher_text,private_key)
cipher_text = cipher_text.decode("UTF-8")
io_plain_text.insert(0.0,cipher_text)
#清空明文框
def delete_plain_text():
io_plain_text.delete(index1=0.0,index2="end")
#清空密文框
def delete_cipher_text():
io_cipher_text.delete(index1=0.0, index2="end")
#GUI界面
window = tkinter.Tk()
window.title('PC端简易RSA')
window.minsize(600,400)
#密钥相关操作
input_key_length = tkinter.Spinbox(window,values = ('未选择','1024','2048','4096'),command = get_key_length)
input_key_length.place(x=50,y=25)
output_key = tkinter.Button(window,text = "生成一对密钥",width = 12,height = 1,command = generate_key)
output_key.place(x=225,y=20)
input_public_key = tkinter.Button(window,text = "导入公钥",width = 12,height = 1,command = get_public_key)
input_public_key.place(x=325,y=20)
input_private_key = tkinter.Button(window,text = "导入私钥",width = 12,height = 1,command = get_private_key)
input_private_key.place(x=425,y=20)
#明文框部分
io_plain_text = tkinter.Text(window,width = 60,height = 6)
io_plain_text.place(x=120,y=80)
use_public_key = tkinter.Button(window,text = "用公钥加密",width = 10,height = 2,command = encrypt)
use_public_key.place(x=25,y=80)
clear_plain_text = tkinter.Button(window,text = "清空明文框",width = 10,height = 2,command = delete_plain_text)
clear_plain_text.place(x=25,y=130)
#密文框部分
io_cipher_text = tkinter.Text(window,width = 60,height = 6)
io_cipher_text.place(x=120,y=250)
use_private_key = tkinter.Button(window,text = "用私钥解密",width = 10,height = 2,command = decrypt)
use_private_key.place(x=25,y=250)
clear_cipher_text = tkinter.Button(window,text = "清空密文框",width = 10,height = 2,command = delete_cipher_text)
clear_cipher_text.place(x=25,y=300)
window.mainloop()
``` |
{
"source": "2B1S/heart.io-backend",
"score": 3
} |
#### File: src/utils/convert_for_tf.py
```python
from keras.models import model_from_json
from keras import backend as K
import keras
import tensorflow as tf
import os
from shutil import rmtree
def convert_for_tf(modelpath, weightspath, export_path, clear_converted=False):
K.set_learning_phase(0)
model = None
with open(modelpath, "r") as file:
loaded_json = file.read()
model = model_from_json(loaded_json)
model.load_weights(weightspath)
if clear_converted and os.path.exists(export_path):
rmtree(export_path)
with K.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={ 'input_image_bytes': model.input },
outputs={ t.name: t for t in model.outputs }
)
if __name__ == "__main__":
print('Converting Keras model for use with Tensorflow...')
convert_for_tf(
modelpath='../ml-data/keras-files/skin-model.json',
weightspath='../ml-data/keras-files/skin-model.h5',
export_path='../ml-data/tf_export',
clear_converted=True
)
print('Done!')
``` |
{
"source": "2B5/ia-3B5",
"score": 3
} |
#### File: demos/nltk/nltk_demo.py
```python
from nltk.tokenize import TweetTokenizer
from nltk.stem import WordNetLemmatizer
# https://stackoverflow.com/questions/1902967/nltk-how-to-find-out-what-corpora-are-installed-from-within-python
import sys
import codecs
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
file_text = open("../source.txt", 'r').read()
#nltk.download
#if wordnet corpus not present
def nltk_tokenize(text):
tokens = []
tknzr = TweetTokenizer()
tokens = tknzr.tokenize(text)
return tokens
def nltk_lemmatize(tokens):
wnl = WordNetLemmatizer()
for i in range(len(tokens)):
tokens[i] = wnl.lemmatize(tokens[i])
return tokens
if __name__ == '__main__':
tokens = nltk_tokenize(file_text)
print 'tokenized: ', tokens
tokens = nltk_lemmatize(tokens)
print 'lemmatized', tokens
```
#### File: demos/nltk/test.py
```python
import unittest
import nltk_demo
from sys import flags
from sys import argv
# 'and' operator shortcircuits by default if first condition fails
# if first condition succeeds, second operand won't have index out of range
if len(argv) > 1 and argv[1] == '-v':
verbose = True
else:
verbose = flags.verbose
class MyTest(unittest.TestCase):
def test_lemmatize(self):
if not verbose:
print '\n --- Testing function nltk_lemmatize() ----'
self.assertEqual(nltk_demo.nltk_lemmatize(['Hello', ',', 'my', 'name', 'is']), ['Hello', ',', 'my', 'name', 'is'])
def test_tokenize(self):
if not verbose:
print '\n --- Testing function nltk_tokenize() ----'
self.assertEqual(nltk_demo.nltk_tokenize('Hello, my name is'), ['Hello', ',', 'my', 'name', 'is'])
if __name__ == '__main__':
unittest.main()
```
#### File: demos/pycore/pycore_demo.py
```python
from pycorenlp import StanfordCoreNLP
import sys
import codecs
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
import urllib2 as url2
import zipfile
import os
import subprocess
import time
def download_file(url):
# Open the url
try:
f = url2.urlopen(url)
print "downloading " + url
with open(os.path.basename(url), "wb") as local_file:
local_file.write(f.read())
except url2.HTTPError, e:
print "HTTP Error:", e.code, url
except url2.URLError, e:
print "URL Error:", e.reason, url
def unzip_file(path):
path_noext = os.path.splitext(path)[0]
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extractall()
zip_ref.close()
def open_pycore_server():
if os.path.exists('./stanford-corenlp-full-2016-10-31.zip'): #isfile
print 'Archive Present'
else:
download_file('http://nlp.stanford.edu/software/stanford-corenlp-full-2016-10-31.zip')
if os.path.exists('./stanford-corenlp-full-2016-10-31'): #isdir
print 'Archive unarchived'
else:
unzip_file('stanford-corenlp-full-2016-10-31.zip')
#subprocess.call('run_stanford_corenlp_server.bat', os.P_NOWAIT, shell=True)
#os.spawnl(os.P_DETACH, '...')
p = subprocess.Popen('run_stanford_corenlp_server.bat', creationflags=subprocess.CREATE_NEW_CONSOLE)
time.sleep(4) # find workaround - ~4s for win 8.1, core i5
def corenlp_tokenize(text):
nlp = StanfordCoreNLP('http://localhost:9000')
output = nlp.annotate(text, properties={
'annotators': 'tokenize,ssplit,pos,depparse,parse',
'outputFormat': 'json'
})
print(output['sentences'][0]['parse'])
return output
if __name__ == '__main__':
open_pycore_server()
file_text = open("../source.txt", 'r').read()
#print 'original: ', file_text
tokens = corenlp_tokenize(file_text)
print 'tokenized: ', tokens
```
#### File: module2/Bot/bot_user_session.py
```python
import cherrypy
import aiml
class Response(object):
def __init__(self):
self.kernel = aiml.Kernel()
self.kernel.learn("startup.xml")
self.kernel.respond("load aiml")
self.question = Question()
def _cp_dispatch(self, vpath):
if len(vpath) == 1:
cherrypy.request.params['uid'] = vpath.pop()
return self
if len(vpath) == 2:
vpath.pop(0)
cherrypy.request.params['question'] = vpath.pop(0)
return self.question
return vpath
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self, question, uid):
if os.path.isfile(str(uid) + ".brn"):
self.kernel.bootstrap(brainFile=str(uid) + ".brn")
else:
self.kernel.bootstrap(learnFiles="startup.xml", commands="load aiml")
self.kernel.saveBrain(str(uid) + ".brn")
return {'response': self.kernel.respond(question,uid)}
class Question(object):
def __init__(self):
self.kernel = aiml.Kernel()
self.kernel.learn("startup.xml")
self.kernel.respond("load aiml")
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self, question):
return {'response': self.kernel.respond(question)}
if __name__ == '__main__':
cherrypy.quickstart(Response())
config = {'/':
{
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': False,
}
}
cherrypy.tree.mount(Response(), config=config)
```
#### File: module3/preprocessing/detectLanguage.py
```python
from langdetect import detect, language, detect_langs, DetectorFactory
from textblob import TextBlob
language_validation_limit= language.Language('en',0.8)
DetectorFactory.seed = 0
def detectLang(text):
result = detect_langs(text)
print(result)
lang = detect(text)
probable_language = result[0]
if lang=='en' and probable_language > language_validation_limit:
return 'en'
else:
return 'other'
def translate():
# Method two with translate
txt=TextBlob(text)
myTxt=txt.translate(to="en")
if myTxt==txt:
print("It's english")
else:
print("Is another language")
#print (detectLang("Is just a text to test a request what is wrong with you?"))
```
#### File: module3/preprocessing/errorCorrect.py
```python
from textblob import TextBlob,Word
def correct(text):
t = TextBlob(text)
return str(t.correct())
def spellcheck(text):
txt=["She","is","mw","moom"]
for w in txt:
word=Word(w)
print(word.spellcheck())
```
#### File: module3/preprocessing/errorCorrect_test.py
```python
import unittest
import errorCorrect
from sys import argv
from sys import flags
if len(argv) > 1 and argv[1] == '-v':
verbose = True
else:
verbose = flags.verbose
class MyTest(unittest.TestCase):
def test_correct(self):
if not verbose:
print('\n --- Testing function correct("She is mw moom"). Should return "The is my room" ----')
self.assertEqual(errorCorrect.correct('She is mw moom.'), 'The is my room')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "2ba2/fisher",
"score": 3
} |
#### File: app/forms/auth.py
```python
from wtforms import StringField, PasswordField, Form
from wtforms.validators import Length, Email, \
ValidationError, EqualTo
from .base import DataRequired
from app.models.user import User
class EmailForm(Form):
email = StringField('电子邮件', validators=[DataRequired(), Length(1, 64),
Email(message='电子邮箱不符合规范')])
class ResetPasswordForm(Form):
password1 = PasswordField('<PASSWORD>', validators=[
DataRequired(), Length(6, 20, message='密码长度至少需要在6到20个字符之间'),
EqualTo('password2', message='两次输入的密码不相同')])
password2 = PasswordField('<PASSWORD>密码', validators=[
DataRequired(), Length(6, 20)])
class ChangePasswordForm(Form):
old_password = PasswordField('<PASSWORD>', validators=[DataRequired()])
new_password1 = PasswordField('<PASSWORD>', validators=[
DataRequired(), Length(6, 10, message='密码长度至少需要在6到20个字符之间'),
EqualTo('new_password2', message='两次输入的密码不一致')])
new_password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
class LoginForm(EmailForm):
password = PasswordField('密码', validators=[
DataRequired(message='密码不可以为空,请输入你的密码')])
class RegisterForm(EmailForm):
nickname = StringField('昵称', validators=[
DataRequired(), Length(2, 10, message='昵称至少需要两个字符,最多10个字符')])
password = PasswordField('密码', validators=[
DataRequired(), Length(6, 20)])
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('电子邮件已被注册')
def validate_nickname(self, field):
if User.query.filter_by(nickname=field.data).first():
raise ValidationError('昵称已存在')
```
#### File: app/libs/enums.py
```python
from enum import Enum
class PendingStatus(Enum):
"""交易状态"""
waiting = 1
success = 2
reject = 3
redraw = 4
# gifter_redraw = 5
@classmethod
def pending_str(cls, status, key):
key_map = {
cls.waiting: {
'requester': '等待对方邮寄',
'gifter': '等待你邮寄'
},
cls.reject: {
'requester': '对方已拒绝',
'gifter': '你已拒绝'
},
cls.redraw: {
'requester': '你已撤销',
'gifter': '对方已撤销'
},
cls.success: {
'requester': '对方已邮寄',
'gifter': '你已邮寄,交易完成'
}
}
return key_map[status][key]
class GiftStatus(Enum):
waiting = 0
success = 1
redraw = 2
```
#### File: app/libs/http.py
```python
import requests
class HTTP:
# 经典类和新式类
@staticmethod
def get(url, return_json=True):
r = requests.get(url)
# restful
# json
if r.status_code != 200:
return {} if return_json else ''
return r.json() if return_json else r.text
class Http(object):
def __init__(self, url):
self.url = url
@staticmethod
def get(url, json_return=True):
r = requests.get(url)
if r.status_code != 200:
return {} if json_return else ''
return r.json() if json_return else r.text
```
#### File: app/view_models/book.py
```python
from app.libs.helper import get_isbn
class BookViewModel:
def __init__(self, data):
# if not isinstance(data, dict):V
# author = data.author
# data = data.__dict__
# data['author'] = author
self.title = data['title']
self.author = '、'.join(data['author'])
self.binding = data['binding']
self.publisher = data['publisher']
self.image = data['image']
self.price = '¥' + data['price'] if data['price'] else data['price']
self.isbn = get_isbn(data)
self.pubdate = data['pubdate']
self.summary = data['summary']
self.pages = data['pages']
@property
def intro(self):
intros = filter(lambda x: True if x else False,
[self.author, self.publisher, self.price])
return ' / '.join(intros)
class BookCollection:
def __init__(self):
self.total = 0
self.books = []
self.keyword = None
def fill(self, yushu_book, keyword):
self.total = yushu_book.total
self.books = [BookViewModel(book) for book in yushu_book.books]
self.keyword = keyword
class BookViewModelOld:
@classmethod
def from_api(cls, keyword, data):
'''
为什么不在spider里做成viewmodel?
从豆瓣获取的数据可能是单本,也可能是多本集合
data 有三种情况:
1. 单本
2. 空对象
3. 有对象
'''
# if not data:
yushu_books = data.get('books', 'null')
if yushu_books == 'null':
total = 1
temp_books = [data]
else:
if len(yushu_books) > 0:
total = data['total']
temp_books = yushu_books
else:
total = 0
temp_books = []
books = []
for book in temp_books:
book = cls.get_detail(book, 'from_api')
books.append(book)
# douban_books = result['books'] if result.get('books') else [result]
view_model = {
'total': total,
'keyword': keyword,
'books': books
}
return view_model
@classmethod
def single_book_from_mysql(cls, keyword, data):
count = 1
if not data:
count = 0
returned = {
'total': count,
'keyword': keyword,
'books': [cls.get_detail(data)]
}
return returned
@classmethod
def get_detail(cls, data, from_where='from_mysql'):
if from_where == 'from_api':
book = {
'title': data['title'],
'author': '、'.join(data['author']),
'binding': data['binding'],
'publisher': data['publisher'],
'image': data['images']['large'],
'price': data['price'],
'isbn': data['isbn'],
'pubdate': data['pubdate'],
'summary': data['summary'],
'pages': data['pages']
}
else:
book = {
'title': data['title'],
'author': '、'.join(data['author']),
'binding': data['binding'],
'publisher': data['publisher'],
'image': data.image,
'price': data['price'],
'isbn': data.isbn,
'pubdate': data['pubdate'],
'summary': data['summary'],
'pages': data['pages']
}
return book
# @classmethod
# def get_isbn(cls, book):
# isbn13 = book.get('isbn13', None)
# isbn10 = book.get('isbn10', None)
# return isbn13 if isbn13 else (isbn10 if isbn10 else '')
``` |
{
"source": "2baOrNot2ba/iLiSA",
"score": 2
} |
#### File: ilisa/monitorcontrol/_rem_exec.py
```python
import subprocess
import os
try:
import paramiko
IMPORTED_PARAMIKO = True
except ImportError:
IMPORTED_PARAMIKO = False
def _exec_rem(remnode, cmdline, stdoutdir, nodetype='LCU',
background_job=False, dryrun=False, accessible=False, quotes="'",
verbose=True):
return _exec_ssh(remnode, cmdline, stdoutdir, nodetype=nodetype,
background_job=background_job, dryrun=dryrun,
accessible=accessible, quotes=quotes, verbose=verbose)
def _exec_ssh(nodeurl, cmdline, stdoutdir='~', nodetype='LCU',
background_job=False, dryrun=False, accessible=False, quotes="'",
verbose=True):
"""Execute a command on the remnode, either as a background job or in the
foreground (blocking). Typically access is remote via ssh.
(To speed things up use the ssh CommandMaster option.)
"""
nodeprompt = "On " + nodetype + "> "
if nodeurl.endswith('localhost'):
shellinvoc = ''
quotes = ''
else:
shellinvoc = "ssh " + nodeurl
output = None
if background_job:
# Currently only run_beamctl & run_tbbctl run in background
# Put stdout & stderr in log in dumpdir
cmdline = ("(( " + cmdline + " ) > " + stdoutdir
+ "lcu_shell_out.log 2>&1) &")
if dryrun:
pre_prompt = "(dryrun) "
else:
pre_prompt = ""
if verbose:
print(pre_prompt + nodeprompt + cmdline)
if (not dryrun) and accessible:
if background_job == 'locally':
# Runs in background locally rather than in background on LCU
output = subprocess.run(shellinvoc + " " + cmdline + " &",
shell=True, stdout=subprocess.PIPE).stdout
else:
output = subprocess.run(shellinvoc + " "
+ quotes + cmdline + quotes,
shell=True, universal_newlines = True,
stdout=subprocess.PIPE).stdout
if output:
output = output.rstrip()
elif not accessible:
print("Warning: not running as " + nodeurl
+ " since it is not accesible.")
return output
def __exec_lcu_paramiko(self, cmdline, backgroundJOB=False):
lcuprompt = "LCUp>"
if self.DryRun:
preprompt = "(dryrun)"
else:
preprompt = ""
if backgroundJOB is True:
cmdline = "(( " + cmdline + " ) > " + self._home_dir +\
"lofarctl.log 2>&1) &"
if self.verbose:
print("{} {} {}".format(preprompt, lcuprompt, cmdline))
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
ssh_config = paramiko.SSHConfig()
user_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(user_config_file):
with open(user_config_file) as f:
ssh_config.parse(f)
cfg = {'hostname': self.hostname, 'username': self.user}
user_config = ssh_config.lookup(cfg['hostname'])
for k in ('hostname', 'username', 'port'):
if k in user_config:
cfg[k] = user_config[k]
if 'proxycommand' in user_config:
cfg['sock'] = paramiko.ProxyCommand(user_config['proxycommand'])
client.connect(**cfg)
stdin, stdout, stderr = client.exec_command(cmdline)
print(stdout.read())
client.close()
def __stdout_ssh(nodeurl, cmdline, nodetype='LCU', dryrun=False,
verbose=True):
"""Execute a command on the remnode and return its output."""
nodeprompt = "On " + nodetype + "> "
shellinvoc = "ssh " + nodeurl
if dryrun:
prePrompt = "(dryrun) "
else:
prePrompt = ""
if verbose:
print(prePrompt + nodeprompt + cmdline)
if not(dryrun):
try:
output = subprocess.check_output(shellinvoc + " '" + cmdline + "'",
shell=True).rstrip()
output = str(output.decode('UTF8'))
except subprocess.CalledProcessError as e:
raise e
else:
output = "None"
return output
def __outfromLCU(self, cmdline, integration, duration):
"""Execute a command on the LCU and monitor progress."""
LCUprompt = "LCUo> "
shellinvoc = "ssh " + self.lcuURL
if self.DryRun:
prePrompt = "(dryrun) "
else:
prePrompt = ""
if self.verbose:
print(prePrompt+LCUprompt+cmdline)
if self.DryRun is False:
cmd = subprocess.Popen(shellinvoc+" '"+cmdline+"'",
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
else:
return None
count = 0
outstrname = 'stderr'
while cmd.poll() is None:
if outstrname == 'stdout':
outstr = cmd.stdout
elif outstrname == 'stderr':
outstr = cmd.stderr
else:
raise ValueError("Unknown output name {}".format(outstrname))
try:
got = cmd.stderr.readline().decode('utf8')
except IOError:
raise IOError()
else:
# print got
if "shape(stats)=" in got:
if count % 2 == 0:
print(str(int(round(duration-count/2.0*integration, 0)
)) + "sec left out of " + str(duration))
count += 1
if __name__ == '__main__':
import sys
hn = _exec_ssh(sys.argv[1], sys.argv[2], accessible=True)
print(hn)
``` |
{
"source": "2baOrNot2ba/Myra",
"score": 3
} |
#### File: dreambeam/rime/scenarios.py
```python
import numpy as np
import dreambeam.rime.jones
from dreambeam.telescopes.rt import load_mountedfeed
from dreambeam.rime.conversion_utils import basis2basis_transf, C09toIAU
def on_pointing_axis_tracking(telescopename, stnid, band, antmodel, obstimebeg,
obsdur, obstimestp, pointingdir,
do_parallactic_rot=True):
"""Computes the Jones matrix along pointing axis while tracking a fixed
celestial source.
This function computes the Jones of an observational setup where a
telescope station is tracking a source (on-axis) on the sky using one of
its bands.
The Jones matrix computed is the matrix which maps the two transverse
components of the E-field vector (V/m or unitless) at a given frequency
propagating from the pointing direction to the telescope stations two
polarization channels. The x,y components of the source are as specified by
the IAU for polarized emissions; and the output components are the two
ordered polarization channels. The matrix inverse of the output Jones
matrix multiplied from the left on the channelized 2D voltage data will
produce an estimate of the E-field vector of the source in the IAU for all
the frequencies in the band over the times of the tracking.
The response of the dual-polarized feed is modeled using the `antmodel`
specified.
Parameters
----------
telescopename : str
Name of telescope, as registered in TelescopesWiz() instance.
stnid : str
Name or ID of the station, as registered in TelescopesWiz() instance.
band : str
Name of band, as registered in TelescopesWiz() instance.
antmodel : str
Name of antenna model, e.g. 'Hamaker', as registered in TelescopesWiz()
instance.
obstimebeg : datetime.datetime
Date-time when the tracking observation begins.
obsdur : datetime.deltatime
Duration of the entire tracking observation in seconds. The sample
at obstimebeg+duration is included.
obstimestp : datetime.deltatime
Time step in seconds for which the jones matrix should be sampled at.
pointingdir : (float, float, str)
Length 3 tuple encoding the tracking direction on the celestial sphere.
The last tuple element should usually be 'J2000', in which case the
the first two tuple elements are the right ascension and declination,
respectively, in radians.
do_parallactic_rot : bool (optional)
Whether of not to perform parallactic rotation (default True).
Returns
-------
timespy : array
The python datetime of the samples.
freqs : array
The frequency [Hz] at which the Jones was computed.
jones : array_like
Array over time steps and frequencies of the Jones matrix corresponding
to RIME for this set of input parameters.
jonesobj : Jones(object)
Resulting instance of Jones(). jonesobj.jones is a copy of the
other return variable: jones. In addition, it has more info regarding
this Jones matrix such as its basis.
Notes
-----
Specifically, this function only considers an RIME consisting of the
projection from the celestial (Earth-Centered-Inertial) frame to the
topocentric station frame, and the polarimetric responce of the
dual-polarized antenna feed. These are known as the P-Jones and the E-Jones
respectively.
Examples
--------
Here is an example where Cassiopeia A was tracked with the Swedish
LOFAR telescope HBA band starting at '2019-08-03T12:00:00' and lasting
24 hours (86400s) sampled every hour (3600s):
>>> from dreambeam.rime.scenarios import *
>>> from datetime import datetime, deltatime
>>> obstimebeg = datetime.strptime('2019-08-30T12:00:00',
... "%Y-%m-%dT%H:%M:%S")
>>> duration = deltatime(hours=24)
>>> obstimestp = deltatime(hours=1)
>>> pointingdir = (6.11, 1.02, 'J2000')
>>> samptimes, freqs, jones, jonesobj = on_pointing_axis_tracking('LOFAR',
... 'HBA', 'Hamaker', 'SE607', obstimebeg, duration, obstimestp,
... pointingdir)
>>> print(jones.shape)
(1024, 25, 2, 2)
>>> print(freqs.shape)
(1024,)
>>> print(samptimes.shape)
(25,)
>>> print(jones[512,0,:,:])
[[ 0.35038040-0.02403619j 0.46850486+0.01755369j]
[ 0.39298880-0.02620409j -0.38686167-0.01691861j]]
"""
# *Setup Source*
srcfld = dreambeam.rime.jones.DualPolFieldPointSrc(pointingdir)
# *Load moouted feed*
stnfeed = load_mountedfeed(telescopename, stnid, band, antmodel)
stnrot = stnfeed.stnRot
# *Setup PJones*
timespy = []
nrTimSamps = int((obsdur.total_seconds()/obstimestp.seconds))+1
for ti in range(0, nrTimSamps):
timespy.append(obstimebeg+ti*obstimestp)
pjones = dreambeam.rime.jones.PJones(timespy, np.transpose(stnrot),
do_parallactic_rot=do_parallactic_rot)
# *Setup EJones*
ejones = stnfeed.getEJones(pointingdir)
freqs = stnfeed.getfreqs()
# *Setup MEq*
pjonesOfSrc = pjones.op(srcfld)
jonesobj = ejones.op(pjonesOfSrc)
# Get the resulting Jones matrices
# (structure is Jn[freqIdx, timeIdx, chanIdx, compIdx] )
jones = jonesobj.getValue()
if not do_parallactic_rot:
basis_from = jonesobj.get_basis()
basis_to = pjonesOfSrc.get_basis()
btransmat2d = basis2basis_transf(basis_from, basis_to)[..., 1:, 1:]
# Tranformation from IAU2C09 has to be (right) transformed back 1st
transmat2d = np.matmul(C09toIAU[1:, 1:], btransmat2d)
jones = np.matmul(jones, transmat2d)
jonesobj.jones = jones
return timespy, freqs, jones, jonesobj
def primarybeampat(telescopename, stnid, band, antmodel, freq,
pointing=(0., np.pi/2, 'STN'), obstime=None, lmgrid=None):
"""Computes the Jones matrix over the beam fov for pointing.
"""
# Get the telescopeband instance:
stnfeed = load_mountedfeed(telescopename, stnid, band, antmodel)
stnrot = stnfeed.stnRot
# *Setup Source*
(az, el, refframe) = pointing
srcfld = dreambeam.rime.jones.DualPolFieldRegion(refframe, iaucmp=False,
lmgrid=lmgrid)
# *Setup Parallatic Jones*
pjones = dreambeam.rime.jones.PJones([obstime], np.transpose(stnrot))
# *Setup EJones*
# **Select frequency
freqs = stnfeed.getfreqs()
frqIdx = np.where(np.isclose(freqs, freq, atol=190e3))[0][0]
# N.B. Ejones doesn't really use pointing
ejones = stnfeed.getEJones(pointing, [freqs[frqIdx]])
# *Setup MEq*
pjones_src = pjones.op(srcfld)
if refframe == dreambeam.rime.jones.Jones._topo_frame:
j2000basis = pjones_src.jonesbasis
# Since refFrame is STN, pjones is inverse of ordinary J2000 to STN.
# By inverting it, one gets the ordinary conversion back.
pjones_src = dreambeam.rime.jones.inverse(pjones_src)
else:
j2000basis = srcfld.jonesbasis
res = ejones.op(pjones_src)
# Because we started off with iaucmp=False, but want IAU components:
res.convert2iaucmp()
dreambeam.rime.jones.fix_imaginary_directions(res)
# NOTE: Not using get_basis() method, in order to get station basis instead
# of antenna basis:
stnbasis = res.jonesbasis
# Get the resulting Jones matrices
# (structure is Jn[freqIdx, timeIdx, chanIdx, compIdx] )
res_jones = res.getValue()
return res_jones, stnbasis, j2000basis
``` |
{
"source": "2baOrNot2ba/SWHT",
"score": 2
} |
#### File: SWHT/SWHT/ft.py
```python
import numpy as np
import ephem
import sys,os
import struct
import time
##### START: These function are not used anymore
def phsCenterSrc(obs, t):
"""return an ephem FixedBody source based on the time offset from the obs"""
src = ephem.FixedBody()
t0 = obs.date
obs.date = t
src._ra = obs.sidereal_time()
src._dec = obs.lat
obs.date = t0
return src
def eq2top_m(ha, dec):
"""Return the 3x3 matrix converting equatorial coordinates to topocentric
at the given hour angle (ha) and declination (dec)."""
sin_H, cos_H = np.sin(ha), np.cos(ha)
sin_d, cos_d = np.sin(dec), np.cos(dec)
zero = np.zeros_like(ha)
map = np.array( [[ sin_H , cos_H , zero ],
[ -sin_d*cos_H, sin_d*sin_H, cos_d ],
[ cos_d*cos_H, -cos_d*sin_H, sin_d ]])
if len(map.shape) == 3: map = map.transpose([2, 0, 1])
return map
def get_baseline(i, j, src, obs):
"""Return the baseline corresponding to i,j"""
bl = j - i
try:
if src.alt < 0:
raise PointingError('Phase center below horizon')
m=src.map
except(AttributeError):
ra,dec = src._ra,src._dec
#opposite HA since the we want to move the source at zenith away to phase to the original zenith source
m = eq2top_m(ra-obs.sidereal_time(), dec)
#normal HA
#m = eq2top_m(obs.sidereal_time() - ra, dec)
return np.dot(m, bl).transpose()
def gen_uvw(i, j, src, obs, f):
"""Compute uvw coordinates of baseline relative to provided FixedBody"""
x,y,z = get_baseline(i,j,src,obs)
afreqs = np.reshape(f, (1,f.size))
afreqs = afreqs/ephem.c #1/wavelength
if len(x.shape) == 0: return np.array([x*afreqs, y*afreqs, z*afreqs]).T
x.shape += (1,); y.shape += (1,); z.shape += (1,)
return np.array([np.dot(x,afreqs), np.dot(y,afreqs), np.dot(z,afreqs)]).T
def xyz2uvw(xyz, src, obs, f):
"""Return an array of UVW values"""
uvw = np.zeros((f.shape[0], xyz.shape[0], xyz.shape[0], 3))
for i in range(xyz.shape[0]):
for j in range(xyz.shape[0]):
if i==j: continue
uvw[:, i, j, :] = gen_uvw(xyz[i], xyz[j], src, obs, f)[:,0,:]
return uvw
##### STOP: These function are not used anymore
def dft2(d, l, m, u, v, psf=False):
"""compute the 2d DFT for position (m,l) based on (d,uvw)"""
if psf: return np.sum(np.exp(2.*np.pi*1j*((u*l) + (v*m))))/u.size
else: return np.sum(d*np.exp(2.*np.pi*1j*((u*l) + (v*m))))/u.size
def dftImage(d, uvw, px, res, mask=False, rescale=False, stokes=False):
"""return a DFT image
d: complex visibilities [F, Q] F frequency subbands, Q samples
uvw: visibility sampling in units of wavelengths [Q, 3]
px: [int, int], number of pixels in image
res: float, resolution of central pixel in radians
rescale: account for missing np.sqrt(1-l^2-m^2) in flat-field approximation
"""
if stokes: im = np.zeros((px[0], px[1], 4),dtype=complex)
else: im = np.zeros((px[0], px[1]), dtype=complex)
maskIm = np.zeros((px[0], px[1]), dtype=bool)
mid_m = int(px[0]/2.) #middle pixel number in m direction
mid_l = int(px[1]/2.) #middle pixel number in l direction
u = np.array(uvw[:,0])
v = np.array(uvw[:,1])
w = np.array(uvw[:,2])
#fov = [px[0]*res*(180./np.pi), px[1]*res*(180./np.pi)] #Field of View in degrees
#set (l,m) range based on the number of pixels and resolution
lrange = np.linspace(-1.*px[0]*res/2., px[0]*res/2., num=px[0], endpoint=True)/(np.pi/2.) #m range (-1,1)
mrange = np.linspace(-1.*px[1]*res/2., px[1]*res/2., num=px[1], endpoint=True)/(np.pi/2.) #l range (-1,1)
start_time = time.time()
for mid,m in enumerate(mrange):
for lid,l in enumerate(lrange):
#rescale to account for missing np.sqrt(1-l^2-m^2) in flat-field approximation
if rescale: scale = np.sqrt(1.-(l**2.)-(m**2.))
else: scale = 1.
if stokes:
im[lid,mid,0] = dft2(d[0], l, m, u, v) * scale
im[lid,mid,1] = dft2(d[1], l, m, u, v) * scale
im[lid,mid,2] = dft2(d[2], l, m, u, v) * scale
im[lid,mid,3] = dft2(d[3], l, m, u, v) * scale
else: im[lid,mid] = dft2(d, m, l, u, v) * scale
if mask: #mask out region beyond field of view
rad = (m**2 + l**2)**.5
if rad > 1.: maskIm[lid,mid] = True
print time.time() - start_time
im = np.flipud(np.fliplr(im)) #make top-left corner (0,0) the south-east point
maskIm = np.flipud(np.fliplr(maskIm))
if mask: return im, maskIm
else: return im
def fftImage(d, uvw, px, res, mask=False, conv='fast', wgt='natural'):
"""Grid visibilities and perform an FFT to return an image
d: complex visibilities
uvw: visibility sampling in units of wavelengths
px: [int, int], number of pixels in image
res: float, resolution of central pixel in radians
"""
start_time = time.time()
im = np.zeros((px[0], px[1]), dtype=complex)
maskIm = np.zeros((px[0], px[1]), dtype=bool)
mid_m = int(px[0]/2.) #middle pixel number in m direction
mid_l = int(px[1]/2.) #middle pixel number in l direction
u = np.array(uvw[:,0])
v = np.array(uvw[:,1])
w = np.array(uvw[:,2])
gridVis = np.zeros((px[0], px[1]), dtype=complex) #array for the gridded visibilities
gridWgt = np.ones((px[0], px[1]), dtype=float) #array for the grid weights
#u,v grid spacing based on the number of pixels and resolution of the desired image
deltau = (np.pi/2.) * 1./(px[0]*res)
deltav = (np.pi/2.) * 1./(px[1]*res)
if conv.startswith('fast'):
for did,dd in enumerate(d):
#simple, rectangular convolution function (nearest neighbor convolution)
uu = int(u[did]/deltau)
vv = int(v[did]/deltav)
gridVis[(uu+(px[0]/2))%px[0], (vv+(px[1]/2))%px[1]] += dd
else:
gridUV = np.mgrid[-.5*px[0]*deltau:.5*px[0]*deltau:deltau, -.5*px[1]*deltav:.5*px[1]*deltav:deltav]
#choose a convolution function to grid with
if conv.startswith('rect'):
convFunc = convRect(deltau, deltav)
truncDist = deltau/2. #truncate box car to within a single pixel
if conv.startswith('gauss'):
convFunc = convGauss(deltau/2., deltav/2.) #half-power point at 1/2 a UV pixel distance
truncDist = deltau*5. #truncate the convolution function to a 5 pixel radius
if conv.startswith('prolate'):
convFunc = convProlate(deltau, deltav)
truncDist = deltau #only values of sqrt(u**2 + v**2) < deltau are valid
#Grid visibilities
for uid in range(px[0]):
for vid in range(px[1]):
ucentre,vcentre = gridUV[:,uid,vid]
udiff = u-ucentre #distance between the ungridded u positon and the gridded u position
vdiff = v-vcentre #distance between the ungridded v positon and the gridded v position
idx = np.argwhere(np.sqrt((udiff**2.)+(vdiff**2.)) < truncDist) #convolution function should be truncated at a reasonable kernel size
if idx.size > 0:
gridWgt[uid,vid] = np.sum(convFunc(udiff[idx],vdiff[idx]))
gridVis[uid,vid] = np.sum(convFunc(udiff[idx],vdiff[idx]) * d[idx])
if wgt.startswith('uni'): gridVis /= gridWgt #uniform weighting, default is natural weighting
gridVis = np.fft.ifftshift(gridVis) #(0,0) position is in the middle, need to shift it to a corner
im = np.fft.ifftshift(np.fft.ifft2(gridVis)) #shift (0,0) back to the middle
im = np.fliplr(np.rot90(im)) #make top-left corner (0,0) the south-east point
print time.time() - start_time
if mask: return im, maskIm
else: return im
def convGauss(ures, vres, alpha=1.):
"""Return a Gaussian convolution function
ures,vres: distance from centre to half power point in uv distance
alpha: scaling factor"""
return lambda uu,vv: ((1./(alpha*np.sqrt(ures*vres*np.pi)))**2.)*np.exp(-1.*(uu/(alpha*ures))**2.)*np.exp(-1.*(vv/(alpha*vres))**2.)
def convRect(ures, vres):
"""Return a boxcar/rectangle convolution function"""
return lambda uu,vv: np.ones_like(uu)
def convProlate(ures, vres, aa=1., cc=1.):
"""Return a prolate spheroid function which returns the function z(uu, vv) = sqrt( cc**2. * ( 1. - (((uu*ures)**2. + (vv*vres)**2.)/aa**2.))), c > a for a prolate function"""
return lambda uu,vv: np.sqrt( cc**2. * (1. - (((uu/ures)**2. + (vv/vres)**2.)/aa**2.)))
if __name__ == '__main__':
print 'Running test cases'
#TODO: add tests
print 'Made it through without any errors.'
```
#### File: SWHT/SWHT/Ylm.py
```python
import numpy as np
def xfact(m):
# computes (2m-1)!!/sqrt((2m)!)
res = 1.
for i in xrange(1, 2*m+1):
if i % 2: res *= i # (2m-1)!!
res /= np.sqrt(i) # sqrt((2m)!)
return res
def lplm_n(l, m, x):
# associated legendre polynomials normalized as in Ylm, from Numerical Recipes 6.7
l,m = int(l),int(m)
assert 0<=m<=l and np.all(np.abs(x)<=1.)
norm = np.sqrt(2. * l + 1.) / np.sqrt(4. * np.pi)
if m == 0:
pmm = norm * np.ones_like(x)
else:
pmm = (-1.)**m * norm * xfact(m) * (1.-x**2.)**(m/2.)
if l == m:
return pmm
pmmp1 = x * pmm * np.sqrt(2.*m+1.)
if l == m+1:
return pmmp1
for ll in xrange(m+2, l+1):
pll = (x*(2.*ll-1.)*pmmp1 - np.sqrt( (ll-1.)**2. - m**2.)*pmm)/np.sqrt(ll**2.-m**2.)
pmm = pmmp1
pmmp1 = pll
return pll
def Ylm(l, m, phi, theta):
# spherical harmonics
# theta is from 0 to pi with pi/2 on equator
l,m = int(l),int(m)
assert 0 <= np.abs(m) <=l
if m > 0:
return lplm_n(l, m, np.cos(theta)) * np.exp(1J * m * phi)
elif m < 0:
return (-1.)**m * lplm_n(l, -m, np.cos(theta)) * np.exp(1J * m * phi)
return lplm_n(l, m, np.cos(theta)) * np.ones_like(phi)
def Ylmr(l, m, phi, theta):
# real spherical harmonics
# theta is from 0 to pi with pi/2 on equator
l,m = int(l),int(m)
assert 0 <= np.abs(m) <=l
if m > 0:
return lplm_n(l, m, np.cos(theta)) * np.cos(m * phi) * np.sqrt(2.)
elif m < 0:
return (-1.)**m * lplm_n(l, -m, np.cos(theta)) * np.sin(-m * phi) * np.sqrt(2.)
return lplm_n(l, m, np.cos(theta)) * np.ones_like(phi)
if __name__ == "__main__":
from scipy.special import sph_harm
from scipy.misc import factorial2, factorial
from timeit import Timer
def ref_xfact(m):
return factorial2(2*m-1)/np.sqrt(factorial(2*m))
print "Time: xfact(10)", Timer("xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(10)", Timer("ref_xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: xfact(80)", Timer("xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(80)", Timer("ref_xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "m", "xfact", "ref_xfact"
for m in range(10) + range(80,90):
a = xfact(m)
b = ref_xfact(m)
print m, a, b
phi, theta = np.ogrid[0:2*np.pi:10j,-np.pi/2:np.pi/2:10j]
print "Time: Ylm(1,1,phi,theta)", Timer("Ylm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "Time: sph_harm(1,1,phi,theta)", Timer("sph_harm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "l", "m", "max|Ylm-sph_harm|"
for l in xrange(0,10):
for m in xrange(-l,l+1):
a = Ylm(l,m,phi,theta)
b = sph_harm(m,l,phi,theta)
print l,m, np.amax(np.abs(a-b))
``` |
{
"source": "2bds/python-pinyin",
"score": 2
} |
#### File: python-pinyin/tests/test_pinyin.py
```python
from __future__ import unicode_literals
import pytest
from pypinyin import (
pinyin, slug, lazy_pinyin, load_single_dict,
load_phrases_dict, NORMAL, TONE, TONE2, TONE3, INITIALS,
FIRST_LETTER, FINALS, FINALS_TONE, FINALS_TONE2, FINALS_TONE3,
BOPOMOFO, BOPOMOFO_FIRST, CYRILLIC, CYRILLIC_FIRST
)
from pypinyin.compat import SUPPORT_UCS4
from pypinyin.core import seg
def test_pinyin_initials():
"""包含声明和韵母的词语"""
hans = '中心'
# 默认风格,带声调
assert pinyin(hans) == [['zh\u014dng'], ['x\u012bn']]
assert pinyin(hans, strict=False) == [['zh\u014dng'], ['x\u012bn']]
# 普通风格,不带声调
assert pinyin(hans, NORMAL) == [['zhong'], ['xin']]
assert pinyin(hans, NORMAL, strict=False) == [['zhong'], ['xin']]
# 声调风格,拼音声调在韵母第一个字母上
assert pinyin(hans, TONE) == [['zh\u014dng'], ['x\u012bn']]
assert pinyin(hans, TONE, strict=False) == [['zh\u014dng'], ['x\u012bn']]
# 声调风格2,即拼音声调在各个声母之后,用数字 [1-4] 进行表示
assert pinyin(hans, TONE2) == [['zho1ng'], ['xi1n']]
assert pinyin(hans, TONE2, strict=False) == [['zho1ng'], ['xi1n']]
# 声调风格3,即拼音声调在各个拼音之后,用数字 [1-4] 进行表示
assert pinyin(hans, TONE3) == [['zhong1'], ['xin1']]
assert pinyin(hans, TONE3, strict=False) == [['zhong1'], ['xin1']]
# 声母风格,只返回各个拼音的声母部分
assert pinyin(hans, INITIALS) == [['zh'], ['x']]
assert pinyin(hans, INITIALS, strict=False) == [['zh'], ['x']]
# 首字母风格,只返回拼音的首字母部分
assert pinyin(hans, FIRST_LETTER) == [['z'], ['x']]
assert pinyin(hans, FIRST_LETTER, strict=False) == [['z'], ['x']]
# 注音风格,带声调
assert pinyin(hans, BOPOMOFO) == [['ㄓㄨㄥ'], ['ㄒㄧㄣ']]
assert pinyin(hans, BOPOMOFO, strict=False) == [['ㄓㄨㄥ'], ['ㄒㄧㄣ']]
# 注音风格,首字母
assert pinyin(hans, BOPOMOFO_FIRST) == [['ㄓ'], ['ㄒ']]
assert pinyin(hans, BOPOMOFO_FIRST, strict=False) == [['ㄓ'], ['ㄒ']]
# test CYRILLIC style
assert pinyin(hans, CYRILLIC) == [['чжун1'], ['синь1']]
assert pinyin(hans, CYRILLIC, strict=False) == [['чжун1'], ['синь1']]
# CYRILLIC_FIRST style return only first letters
assert pinyin(hans, CYRILLIC_FIRST) == [['ч'], ['с']]
assert pinyin(hans, CYRILLIC_FIRST, strict=False) == [['ч'], ['с']]
# 启用多音字模式
assert pinyin(hans, heteronym=True) == [['zh\u014dng', 'zh\xf2ng'],
['x\u012bn']]
assert pinyin(hans, heteronym=True, strict=False) == \
[['zh\u014dng', 'zh\xf2ng'], ['x\u012bn']]
# 韵母风格1,只返回各个拼音的韵母部分,不带声调
assert pinyin(hans, style=FINALS) == [['ong'], ['in']]
assert pinyin(hans, style=FINALS, strict=False) == [['ong'], ['in']]
# 韵母风格2,带声调,声调在韵母第一个字母上
assert pinyin(hans, style=FINALS_TONE) == [['\u014dng'], ['\u012bn']]
assert pinyin(hans, style=FINALS_TONE, strict=False) == \
[['\u014dng'], ['\u012bn']]
# 韵母风格2,带声调,声调在各个声母之后,用数字 [1-4] 进行表示
assert pinyin(hans, style=FINALS_TONE2) == [['o1ng'], ['i1n']]
assert pinyin(hans, style=FINALS_TONE2, strict=False) == \
[['o1ng'], ['i1n']]
# 韵母风格3,带声调,声调在各个拼音之后,用数字 [1-4] 进行表示
assert pinyin(hans, style=FINALS_TONE3) == [['ong1'], ['in1']]
assert pinyin(hans, style=FINALS_TONE3, strict=False) == \
[['ong1'], ['in1']]
def test_pinyin_finals():
"""只包含韵母的词语"""
hans = '嗷嗷'
assert pinyin(hans) == [['\xe1o'], ['\xe1o']]
assert pinyin(hans + 'abc') == [['\xe1o'], ['\xe1o'], ['abc']]
assert pinyin(hans, NORMAL) == [['ao'], ['ao']]
assert pinyin(hans, TONE) == [['\xe1o'], ['\xe1o']]
assert pinyin(hans, TONE2) == [['a2o'], ['a2o']]
assert pinyin(hans, TONE3) == [['ao2'], ['ao2']]
assert pinyin(hans, INITIALS) == [[''], ['']]
assert pinyin(hans, FIRST_LETTER) == [['a'], ['a']]
assert pinyin(hans, BOPOMOFO) == [['ㄠˊ'], ['ㄠˊ']]
assert pinyin(hans, BOPOMOFO_FIRST) == [['ㄠ'], ['ㄠ']]
assert pinyin(hans, CYRILLIC) == [['ао2'], ['ао2']]
assert pinyin(hans, CYRILLIC_FIRST) == [['а'], ['а']]
assert pinyin(hans, heteronym=True) == [['\xe1o'], ['\xe1o']]
assert pinyin('啊', heteronym=True) == \
[['a', 'è', 'ā', 'á', 'ǎ', 'à']]
assert pinyin(hans, style=FINALS) == [['ao'], ['ao']]
assert pinyin(hans, style=FINALS_TONE) == [['\xe1o'], ['\xe1o']]
assert pinyin(hans, style=FINALS_TONE2) == [['a2o'], ['a2o']]
assert pinyin(hans, style=FINALS_TONE3) == [['ao2'], ['ao2']]
def test_slug():
hans = '中心'
assert slug(hans) == 'zhong-xin'
assert slug(hans, heteronym=True) == 'zhong-xin'
def test_zh_and_en():
"""中英文混合的情况"""
# 中英文
hans = '中心'
assert pinyin(hans + 'abc') == [['zh\u014dng'], ['x\u012bn'], ['abc']]
# 中英文混合的固定词组
assert pinyin('黄山B股', style=TONE2) == \
[['hua2ng'], ['sha1n'], ['B'], ['gu3']]
assert pinyin('A股', style=TONE2) == [['A'], ['gu3']]
assert pinyin('阿Q', style=TONE2) == [['a1'], ['Q']]
assert pinyin('B超', style=TONE2) == [['B'], ['cha1o']]
assert pinyin('AB超C', style=TONE2) == [['AB'], ['cha1o'], ['C']]
assert pinyin('AB阿C', style=TONE2) == [['AB'], ['a1'], ['C']]
assert pinyin('维生素C', style=TONE2) == \
[['we2i'], ['she1ng'], ['su4'], ['C']]
def test_others():
# 空字符串
assert pinyin('') == []
# 单个汉字
assert pinyin('營') == [['y\xedng']]
# 中国 人
assert pinyin('中国人') == [['zh\u014dng'], ['gu\xf3'], ['r\xe9n']]
# 日文
assert pinyin('の') == [['\u306e']]
# 没有读音的汉字,还不存在的汉字
assert pinyin('\u9fff') == [['\u9fff']]
def test_lazy_pinyin():
assert lazy_pinyin('中国人') == ['zhong', 'guo', 'ren']
assert lazy_pinyin('中心') == ['zhong', 'xin']
assert lazy_pinyin('中心', style=TONE) == ['zh\u014dng', 'x\u012bn']
assert lazy_pinyin('中心', style=INITIALS) == ['zh', 'x']
assert lazy_pinyin('中心', style=BOPOMOFO) == ['ㄓㄨㄥ', 'ㄒㄧㄣ']
assert lazy_pinyin('中心', style=CYRILLIC) == ['чжун1', 'синь1']
def test_seg():
hans = '音乐'
hans_seg = list(seg(hans))
assert pinyin(hans_seg, style=TONE2) == [['yi1n'], ['yue4']]
# 中英文混合的固定词组
assert pinyin('黄山B股', style=TONE2) == \
[['hua2ng'], ['sha1n'], ['B'], ['gu3']]
assert pinyin('A股', style=TONE2) == [['A'], ['gu3']]
assert pinyin('阿Q', style=TONE2) == [['a1'], ['Q']]
assert pinyin('B超', style=TONE2) == [['B'], ['cha1o']]
assert pinyin('AB超C', style=TONE2) == [['AB'], ['cha1o'], ['C']]
assert pinyin('AB阿C', style=TONE2) == [['AB'], ['a1'], ['C']]
assert pinyin('维生素C', style=TONE2) == \
[['we2i'], ['she1ng'], ['su4'], ['C']]
def test_custom_pinyin_dict():
hans = '桔'
try:
assert lazy_pinyin(hans, style=TONE2) == ['ju2']
except AssertionError:
pass
load_single_dict({ord('桔'): 'jú,jié'})
assert lazy_pinyin(hans, style=TONE2) == ['ju2']
def test_custom_pinyin_dict2():
hans = ['同行']
try:
assert lazy_pinyin(hans, style=TONE2) == ['to2ng', 'ha2ng']
except AssertionError:
pass
load_phrases_dict({'同行': [['tóng'], ['xíng']]})
assert lazy_pinyin(hans, style=TONE2) == ['to2ng', 'xi2ng']
def test_custom_pinyin_dict_tone2():
load_single_dict({ord('桔'): 'ce4,si4'}, style='tone2')
assert lazy_pinyin('桔', style=TONE2) == ['ce4']
assert pinyin('桔') == [['cè']]
def test_custom_pinyin_dict2_tone2():
load_phrases_dict({'同行': [['to4ng'], ['ku1']]}, style='tone2')
assert lazy_pinyin(['同行'], style=TONE2) == ['to4ng', 'ku1']
assert pinyin('同行') == [['tòng'], ['kū']]
def test_errors():
hans = (
('啊', {'style': TONE2}, ['a']),
('啊a', {'style': TONE2}, ['a', 'a']),
('⺁', {'style': TONE2}, ['\u2e81']),
('⺁', {'style': TONE2, 'errors': 'ignore'}, []),
('⺁', {'style': TONE2, 'errors': 'replace'}, ['2e81']),
('⺁⺁', {'style': TONE2, 'errors': 'replace'}, ['2e812e81']),
('鿅', {'style': TONE2}, ['\u9fc5']),
('鿅', {'style': TONE2, 'errors': 'ignore'}, []),
('鿅', {'style': TONE2, 'errors': 'replace'}, ['9fc5']),
('鿅', {'style': TONE2, 'errors': lambda x: ['a']}, ['a']),
)
for han in hans:
assert lazy_pinyin(han[0], **han[1]) == han[2]
def test_errors_callable():
def foobar(chars):
return 'a' * len(chars)
class Foobar(object):
def __call__(self, chars):
return 'a' * len(chars)
n = 5
assert lazy_pinyin('あ' * n, errors=foobar) == ['a' * n]
assert lazy_pinyin('あ' * n, errors=Foobar()) == ['a' * n]
def test_simple_seg():
data = {
'北京abcc': 'be3i ji1ng abcc',
'你好にほんごРусский язык': 'ni3 ha3o にほんごРусский язык',
}
for h, p in data.items():
assert slug([h], style=TONE2, separator=' ') == p
hans = '你好にほんごРусский язык'
ret = 'ni3 ha3o'
assert slug(hans, style=TONE2, separator=' ', errors=lambda x: None) == ret
data_for_update = [
# 便宜的发音
[
['便宜'], {'style': TONE2}, ['pia2n', 'yi2']
],
[
['便宜从事'], {'style': TONE2}, ['bia4n', 'yi2', 'co2ng', 'shi4']
],
[
['便宜施行'], {'style': TONE2}, ['bia4n', 'yi2', 'shi1', 'xi2ng']
],
[
['便宜货'], {'style': TONE2}, ['pia2n', 'yi2', 'huo4']
],
[
['贪便宜'], {'style': TONE2}, ['ta1n', 'pia2n', 'yi2']
],
[
['讨便宜'], {'style': TONE2}, ['ta3o', 'pia2n', 'yi2']
],
[
['小便宜'], {'style': TONE2}, ['xia3o', 'pia2n', 'yi2']
],
[
['占便宜'], {'style': TONE2}, ['zha4n', 'pia2n', 'yi2']
],
#
[
'\u3400', {'style': TONE2}, ['qiu1'], # CJK 扩展 A:[3400-4DBF]
],
[
'\u4E00', {'style': TONE2}, ['yi1'], # CJK 基本:[4E00-9FFF]
],
# [
# '\uFA29', {'style': TONE2}, ['da3o'], # CJK 兼容:[F900-FAFF]
# ],
# 误把 yu 放到声母列表了
['鱼', {'style': TONE2}, ['yu2']],
['鱼', {'style': FINALS}, ['v']],
['鱼', {'style': BOPOMOFO}, ['ㄩˊ']],
['鱼', {'style': CYRILLIC}, ['юй']],
['雨', {'style': TONE2}, ['yu3']],
['雨', {'style': FINALS}, ['v']],
['雨', {'style': BOPOMOFO}, ['ㄩˇ']],
['雨', {'style': CYRILLIC}, ['юй']],
['元', {'style': TONE2}, ['yua2n']],
['元', {'style': FINALS}, ['van']],
['元', {'style': BOPOMOFO}, ['ㄩㄢˊ']],
['元', {'style': CYRILLIC}, ['юань2']],
# y, w 也不是拼音, yu的韵母是v, yi的韵母是i, wu的韵母是u
['呀', {'style': INITIALS}, ['']],
['呀', {'style': TONE2}, ['ya']],
['呀', {'style': FINALS}, ['ia']],
['呀', {'style': BOPOMOFO}, ['ㄧㄚ˙']],
['呀', {'style': CYRILLIC}, ['я']],
['无', {'style': INITIALS}, ['']],
['无', {'style': TONE2}, ['wu2']],
['无', {'style': FINALS}, ['u']],
['无', {'style': FINALS_TONE}, ['ú']],
['无', {'style': BOPOMOFO}, ['ㄨˊ']],
['无', {'style': CYRILLIC}, ['у2']],
['衣', {'style': TONE2}, ['yi1']],
['衣', {'style': FINALS}, ['i']],
['衣', {'style': BOPOMOFO}, ['ㄧ']],
['衣', {'style': CYRILLIC}, ['и1']],
['万', {'style': TONE2}, ['wa4n']],
['万', {'style': FINALS}, ['uan']],
['万', {'style': BOPOMOFO}, ['ㄨㄢˋ']],
['万', {'style': CYRILLIC}, ['вань4']],
# ju, qu, xu 的韵母应该是 v
['具', {'style': FINALS_TONE}, ['ǜ']],
['具', {'style': FINALS_TONE2}, ['v4']],
['具', {'style': FINALS}, ['v']],
['具', {'style': BOPOMOFO}, ['ㄐㄩˋ']],
['具', {'style': CYRILLIC}, ['цзюй4']],
['取', {'style': FINALS_TONE}, ['ǚ']],
['取', {'style': FINALS_TONE2}, ['v3']],
['取', {'style': FINALS}, ['v']],
['取', {'style': BOPOMOFO}, ['ㄑㄩˇ']],
['取', {'style': CYRILLIC}, ['цюй3']],
['徐', {'style': FINALS_TONE}, ['ǘ']],
['徐', {'style': FINALS_TONE2}, ['v2']],
['徐', {'style': FINALS}, ['v']],
['徐', {'style': BOPOMOFO}, ['ㄒㄩˊ']],
['徐', {'style': CYRILLIC}, ['сюй2']],
# ń
['嗯', {'style': NORMAL}, ['n']],
['嗯', {'style': TONE}, ['ń']],
['嗯', {'style': TONE2}, ['n2']],
['嗯', {'style': INITIALS}, ['']],
['嗯', {'style': FIRST_LETTER}, ['n']],
['嗯', {'style': FINALS}, ['n']],
['嗯', {'style': FINALS_TONE}, ['ń']],
['嗯', {'style': FINALS_TONE2}, ['n2']],
['嗯', {'style': BOPOMOFO}, ['ㄣˊ']],
['嗯', {'style': CYRILLIC}, ['н2']],
# ḿ \u1e3f U+1E3F
['呣', {'style': NORMAL}, ['m']],
['呣', {'style': TONE}, ['ḿ']],
['呣', {'style': TONE2}, ['m2']],
['呣', {'style': INITIALS}, ['']],
['呣', {'style': FIRST_LETTER}, ['m']],
['呣', {'style': FINALS}, ['m']],
['呣', {'style': FINALS_TONE}, ['ḿ']],
['呣', {'style': FINALS_TONE2}, ['m2']],
['呣', {'style': BOPOMOFO}, ['ㄇㄨˊ']],
['呣', {'style': CYRILLIC}, ['м2']],
# 41
['彷徨', {}, ['pang', 'huang']],
['彷徨', {'style': CYRILLIC}, ['пан2', 'хуан2']],
# 注音
['打量', {'style': BOPOMOFO}, ['ㄉㄚˇ', 'ㄌㄧㄤ˙']],
['黄山b股', {'style': BOPOMOFO}, ['ㄏㄨㄤˊ', 'ㄕㄢ', 'b', 'ㄍㄨˇ']],
['打量', {'style': CYRILLIC}, ['да3', 'лян']],
['黄山b股', {'style': CYRILLIC}, ['хуан2', 'шань1', 'b', 'гу3']],
# 50
['打量', {'style': TONE2}, ['da3', 'liang']],
['打量', {'style': TONE3}, ['da3', 'liang']],
['侵略', {'style': TONE2}, ['qi1n', 'lve4']],
['侵略', {'style': TONE3}, ['qin1', 'lve4']],
['侵略', {'style': FINALS_TONE2}, ['i1n', 've4']],
['侵略', {'style': FINALS_TONE3}, ['in1', 've4']],
['侵略', {'style': BOPOMOFO}, ['ㄑㄧㄣ', 'ㄌㄩㄝˋ']],
['侵略', {'style': CYRILLIC}, ['цинь1', 'люэ4']],
['〇', {'style': TONE}, ['líng']],
# 二次分词
[['你要', '重新考虑OK'], {'style': TONE}, [
'nǐ', 'yào', 'chóng', 'xīn', 'kǎo', 'lǜ', 'OK']],
]
@pytest.mark.parametrize('hans, kwargs, result', data_for_update)
def test_update(hans, kwargs, result):
assert lazy_pinyin(hans, **kwargs) == result
@pytest.mark.skipif(not SUPPORT_UCS4, reason='dont support ucs4')
@pytest.mark.parametrize(
'han, result', [
['\U00020000', ['he']], # CJK 扩展 B:[20000-2A6DF]
['\U0002A79D', ['duo']], # CJK 扩展 C:[2A700-2B73F]
# ['\U0002B740', ['wu']], # CJK 扩展 D:[2B740-2B81D]
# ['\U0002F80A', ['seng']], # CJK 兼容扩展:[2F800-2FA1F]
]
)
def test_support_ucs4(han, result):
assert lazy_pinyin(han) == result
@pytest.mark.skipif(SUPPORT_UCS4, reason='support ucs4')
@pytest.mark.parametrize(
'han', [
'\U00020000', # CJK 扩展 B:[20000-2A6DF]
'\U0002A79D', # CJK 扩展 C:[2A700-2B73F]
# '\U0002B740', # CJK 扩展 D:[2B740-2B81D]
# '\U0002F80A', # CJK 兼容扩展:[2F800-2FA1F]
]
)
def test_dont_support_ucs4(han):
assert pinyin(han) == [[han]]
def test_36():
hans = '两年前七斤喝醉了酒'
pys = ['liang', 'nian', 'qian', 'qi', 'jin', 'he', 'zui', 'le', 'jiu']
assert lazy_pinyin(hans) == pys
def test_with_unknown_style():
assert lazy_pinyin('中国') == ['zhong', 'guo']
assert lazy_pinyin('中国', style='unknown') == ['zhōng', 'guó']
assert pinyin('中国') == [['zhōng'], ['guó']]
assert pinyin('中国', style='unknown') == [['zhōng'], ['guó']]
if __name__ == '__main__':
import pytest
pytest.cmdline.main()
``` |
{
"source": "2besweet/Covid-19-project",
"score": 3
} |
#### File: 2besweet/Covid-19-project/estimator.py
```python
def estimator(data):
return data
def __init__(self,reportedCases,name,days,totalHospitalbeds,avgDailyIncomeInUsd,avgDailyIncomePopulation):
self.reportedCases=reportedCases
self.name=name
self.days=days
self.totalHospitalbeds=totalHospitalbeds
self.avgDailyIncomeInUsd=avgDailyIncomeInUsd
self.avgDailyIncomePopulation=avgDailyIncomePopulation
def covid19Estimator(self):
myinputs = {
"region": {
"name": self.name,
"avgAge": 19.7,
"avgDailyIncomeInUSD": self.avgDailyIncomeInUsd,
"avgDailyIncomePopulation": self.avgDailyIncomePopulation
},
"periodType": self.days,
"timeToElapse": 58,
"reportedCases": self.reportedCases,
"population": 66622705,
"totalHospitalBeds": self.totalHospitalbeds}
currentlyInfected = self.reportedCases * 10
currentlyInfectedSevere = self.reportedCases * 50
factor = self.days / 3
factorRounded = math.trunc(factor)
InfectionsByRequestedTime = currentlyInfected * (2 ** factorRounded)
InfectionsByRequestedTimeSevere = currentlyInfectedSevere * (2 ** factorRounded)
ImpactSevereCasesByRequestedTime = InfectionsByRequestedTime * 15 / 100
SevereCasesByRequestedTime = InfectionsByRequestedTimeSevere * 15 / 100
hospitalBedsByRequestedTime1 = self.totalHospitalbeds * 35 / 95
hospitalBedsByRequestedTimeAtFullCapacity1 = self.totalHospitalbeds * 35 / 100
hospitalBedsByRequestedTime = math.trunc(hospitalBedsByRequestedTime1)
hospitalBedsByRequestedTimeAtFullCapacity = math.trunc(hospitalBedsByRequestedTimeAtFullCapacity1)
casesForICUByRequestedTime = InfectionsByRequestedTime * 5 / 100
casesForICUByRequestedTimeSevere = InfectionsByRequestedTimeSevere * 5 / 100
casesForVentilatorsByRequestedTime = InfectionsByRequestedTime * 2 / 100
casesForVentilatorsByRequestedTimeSevere = InfectionsByRequestedTimeSevere * 2 / 100
dollarsInFlight = InfectionsByRequestedTime * 0.65 * 1.5 * 30
dollarsInFlightSevere = InfectionsByRequestedTimeSevere * self.avgDailyIncomePopulation * self.avgDailyIncomeInUsd * 30
myoutputs = {
'data': {'inputData': myinputs},
'impact': {
'currentlyInfected': currentlyInfected,
'InfectionsByRequestedTime': InfectionsByRequestedTime,
'SevereCasesByRequestedTime': ImpactSevereCasesByRequestedTime,
'HospitalBedsByRequestedTime': hospitalBedsByRequestedTime,
'hospitalBedsByRequestedTimeFullCapacity': hospitalBedsByRequestedTimeAtFullCapacity,
'casesForICUByRequestedTime': casesForICUByRequestedTime,
'casesForVentilatorsByRequestedTime': casesForVentilatorsByRequestedTime,
'dollarsInFlight': dollarsInFlight,
},
'severeImpact': {
"currentlyInfected": currentlyInfectedSevere,
"InfectionsByRequestedTime": InfectionsByRequestedTimeSevere,
"SevereCasesByRequestedTime": SevereCasesByRequestedTime,
'HospitalBedsByRequestedTime': hospitalBedsByRequestedTime,
'hospitalBedsByRequestedTimeFullCapacity': hospitalBedsByRequestedTimeAtFullCapacity,
'casesForICUByRequestedTime': casesForICUByRequestedTimeSevere,
"casesForVentilatorsByRequestedTime": casesForVentilatorsByRequestedTimeSevere,
'dollarsInFlight': dollarsInFlightSevere
}
}
print(myoutputs)
day=estimator(674,"Africa",28,1380614,1.5,0.65)
day.covid19Estimator()
reportedCases=eval(input('Enter the number of reported cases:-'))
name=input('Enter the name of the region:-')
days=eval(input('Enter the number of days:-'))
totalHospitalbeds=eval(input('Enter the total number of beds available in the region:'))
avgDailyIncomeInUsd=eval(input('Enter the Average income:-'))
avgDailyIncomePopulation=eval(input('Enter the average daily income of the population:-'))/100
reportedCases=674
name="Africa"
days=28
totalHospitalbeds=1380614
avgDailyIncomeInUsd=1.5
avgDailyIncomePopulation=0.65
``` |
{
"source": "2bithacker/peering-manager",
"score": 2
} |
#### File: devices/api/views.py
```python
from rest_framework.routers import APIRootView
from devices.filters import PlatformFilterSet
from devices.models import Platform
from peering_manager.api.views import ModelViewSet
from .serializers import PlatformSerializer
class DevicesRootView(APIRootView):
def get_view_name(self):
return "Devices"
class PlatformViewSet(ModelViewSet):
queryset = Platform.objects.all()
serializer_class = PlatformSerializer
filterset_class = PlatformFilterSet
```
#### File: devices/tests/test_api.py
```python
from unittest.mock import patch
from django.urls import reverse
from rest_framework import status
from devices.models import Platform
from utils.testing import APITestCase, StandardAPITestCases
class AppTest(APITestCase):
def test_root(self):
response = self.client.get(reverse("devices-api:api-root"), **self.header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class PlatformTest(StandardAPITestCases.View):
model = Platform
brief_fields = ["id", "url", "display", "name", "slug"]
create_data = [
{"name": "Test OS", "slug": "test-os"},
{"name": "Bugs OS", "slug": "bugsos", "description": "Nice try one..."},
]
bulk_update_data = {"description": "Favourite vendor"}
@classmethod
def setUpTestData(cls):
Platform.objects.create(name="No Bugs OS", slug="nobugsos")
```
#### File: extras/tests/test_views.py
```python
from unittest.mock import patch
from funcy.funcs import identity
from extras.models import IXAPI
from utils.testing import ViewTestCases
class IXAPITestCase(ViewTestCases.PrimaryObjectViewTestCase):
model = IXAPI
test_bulk_edit_objects = None
@classmethod
def setUpTestData(cls):
IXAPI.objects.bulk_create(
[
IXAPI(
name="IXP 1",
url="https://ixp1-ixapi.example.net/v1/",
api_key="key-ixp1",
api_secret="secret-ixp1",
identity="1234",
),
IXAPI(
name="IXP 2",
url="https://ixp2-ixapi.example.net/v2/",
api_key="key-ixp2",
api_secret="secret-ixp2",
identity="1234",
),
IXAPI(
name="IXP 3",
url="https://ixp3-ixapi.example.net/v3/",
api_key="key-ixp3",
api_secret="secret-ixp3",
identity="1234",
),
]
)
cls.form_data = {
"name": "IXP 4",
"url": "https://ixp4-ixapi.example.net/v1/",
"api_key": "key-ixp4",
"api_secret": "secret-ixp4",
"identity": "1234",
}
def test_get_object_anonymous(self):
with patch(
"extras.models.ix_api.IXAPI.get_customers",
return_value=[
{"id": "1234", "name": "Customer 1"},
{"id": "5678", "name": "Customer 2"},
],
):
super().test_get_object_anonymous()
def test_get_object_with_permission(self):
with patch(
"extras.models.ix_api.IXAPI.get_customers",
return_value=[
{"id": "1234", "name": "Customer 1"},
{"id": "5678", "name": "<NAME>"},
],
):
super().test_get_object_with_permission()
def test_edit_object_with_permission(self):
with patch(
"extras.models.ix_api.IXAPI.get_customers",
return_value=[
{"id": "1234", "name": "<NAME>"},
{"id": "5678", "name": "<NAME>"},
],
):
super().test_edit_object_with_permission()
```
#### File: utils/testing/functions.py
```python
import json
import logging
import re
from contextlib import contextmanager
@contextmanager
def disable_warnings(logger_name):
"""
Suppresses expected warning messages to keep the test output clean.
"""
logger = logging.getLogger(logger_name)
current_level = logger.level
logger.setLevel(logging.ERROR)
yield
logger.setLevel(current_level)
def extract_form_failures(html):
"""
Given raw HTML content from an HTTP response, returns a list of form errors.
"""
FORM_ERROR_REGEX = r"<!-- FORM-ERROR (.*) -->"
return re.findall(FORM_ERROR_REGEX, str(html))
def json_file_to_python_type(filename):
with open(filename, mode="r") as f:
return json.load(f)
def post_data(data):
"""
Takes a dictionary of test data and returns a dict suitable for POSTing.
"""
r = {}
for key, value in data.items():
if value is None:
r[key] = ""
elif type(value) in (list, tuple):
if value and hasattr(value[0], "pk"):
# Value is a list of instances
r[key] = [v.pk for v in value]
else:
r[key] = value
elif hasattr(value, "pk"):
# Value is an instance
r[key] = value.pk
else:
r[key] = str(value)
return r
``` |
{
"source": "2bitoperations/Adafruit_Blinka_Displayio",
"score": 2
} |
#### File: Adafruit_Blinka_Displayio/displayio/bitmap.py
```python
from recordclass import recordclass
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_Blinka_displayio.git"
Rectangle = recordclass("Rectangle", "x1 y1 x2 y2")
class Bitmap:
"""Stores values of a certain size in a 2D array"""
def __init__(self, width, height, value_count):
"""Create a Bitmap object with the given fixed size. Each pixel stores a value that is
used to index into a corresponding palette. This enables differently colored sprites to
share the underlying Bitmap. value_count is used to minimize the memory used to store
the Bitmap.
"""
self._width = width
self._height = height
self._read_only = False
if value_count < 0:
raise ValueError("value_count must be > 0")
bits = 1
while (value_count - 1) >> bits:
if bits < 8:
bits = bits << 1
else:
bits += 8
self._bits_per_value = bits
if (
self._bits_per_value > 8
and self._bits_per_value != 16
and self._bits_per_value != 32
):
raise NotImplementedError("Invalid bits per value")
self._data = (width * height) * [0]
self._dirty_area = Rectangle(0, 0, width, height)
def __getitem__(self, index):
"""
Returns the value at the given index. The index can either be
an x,y tuple or an int equal to `y * width + x`.
"""
if isinstance(index, (tuple, list)):
index = (index[1] * self._width) + index[0]
if index >= len(self._data):
raise ValueError("Index {} is out of range".format(index))
return self._data[index]
def __setitem__(self, index, value):
"""
Sets the value at the given index. The index can either be
an x,y tuple or an int equal to `y * width + x`.
"""
if self._read_only:
raise RuntimeError("Read-only object")
if isinstance(index, (tuple, list)):
x = index[0]
y = index[1]
index = y * self._width + x
elif isinstance(index, int):
x = index % self._width
y = index // self._width
self._data[index] = value
if self._dirty_area.x1 == self._dirty_area.x2:
self._dirty_area.x1 = x
self._dirty_area.x2 = x + 1
self._dirty_area.y1 = y
self._dirty_area.y2 = y + 1
else:
if x < self._dirty_area.x1:
self._dirty_area.x1 = x
elif x >= self._dirty_area.x2:
self._dirty_area.x2 = x + 1
if y < self._dirty_area.y1:
self._dirty_area.y1 = y
elif y >= self._dirty_area.y2:
self._dirty_area.y2 = y + 1
def _finish_refresh(self):
self._dirty_area.x1 = 0
self._dirty_area.x2 = 0
def fill(self, value):
"""Fills the bitmap with the supplied palette index value."""
self._data = (self._width * self._height) * [value]
self._dirty_area = Rectangle(0, 0, self._width, self._height)
@property
def width(self):
"""Width of the bitmap. (read only)"""
return self._width
@property
def height(self):
"""Height of the bitmap. (read only)"""
return self._height
``` |
{
"source": "2bitoperations/raspi-pumpcontrol",
"score": 3
} |
#### File: raspi-pumpcontrol/raspipump/pump.py
```python
import collections
import datetime
import logging
import math
import time
# some constants to help define the states we could be in
OFF = 0
ON = 1
# can't talk to the cistern
COMM_ERROR = -1
# a pipe may be broken, the pump may be broken
FAULT = -2
DRIVER_SYSFS = "sysfs"
DRIVER_GPIOZERO = "gpiozero"
class Pump:
def __init__(self,
cistern,
initialstate_reporter,
pump_pin,
active_high,
max_run_time_minutes,
cooldown_minutes,
sleep_between_readings_seconds,
desired_level,
level_must_move_in_seconds,
level_change_threshold,
driver):
self.state = OFF
if driver == "gpiozero":
from gpiozero import LED
self.pump = LED(pump_pin, active_high=active_high)
elif driver == "sysfs":
from raspipump.sysfsled import SysFSLed
self.pump = SysFSLed(pin=pump_pin, active_high=active_high)
self.pump.off()
self.cistern = cistern
self.initialstate = initialstate_reporter
self.pump_off_time = datetime.datetime.utcfromtimestamp(0)
self.pump_on_time = datetime.datetime.utcfromtimestamp(0)
self.active_high = active_high
self.max_run_time_seconds = max_run_time_minutes * 60
self.cooldown_seconds = cooldown_minutes * 60
self.sleep_between_readings_seconds = sleep_between_readings_seconds
self.desired_level = desired_level
self.level_must_move_in_seconds = level_must_move_in_seconds
self.level_change_threshold = level_change_threshold
def _pump_off(self):
# if the pump isn't already off, record off time.
if self.pump.is_lit:
self.pump_off_time = datetime.datetime.now()
self.pump.off()
def _pump_on(self, level_at_pump_on):
if not self.pump.is_lit:
self.pump_on_history = collections.deque([], maxlen=math.ceil(
self.level_must_move_in_seconds / self.sleep_between_readings_seconds))
self.pump_on_time = datetime.datetime.now()
self.level_at_pump_on = level_at_pump_on
else:
self.pump_on_history.append({"time": datetime.datetime.now(),
"level": float(level_at_pump_on)})
self.pump.on()
def run(self):
try:
while True:
logging.debug(
"starting loop, state is {state}, pump_off_time {pump_off_time}, pump_on_time {pump_on_time}, pump on? {pump_on}"
.format(state=self.state,
pump_off_time=self.pump_off_time,
pump_on_time=self.pump_on_time,
pump_on=self.pump.is_lit))
self.initialstate.report_state(self.state)
# if we're in a FAULT state, we're stuck here. :(:(:(:(
if self.state is FAULT:
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
else:
# have we exceeded our max allowed runtime? then turn pump off
if self.state is ON and not self.max_runtime_allows_running():
logging.info("max allowed runtime exceeded, pump off.")
self.state = OFF
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
# get a fresh reading from the cistern
reading = self.cistern.get_reading()
reading_valid = self.cistern.is_reading_valid(reading,
max_timedelta_seconds=self.sleep_between_readings_seconds * 2)
if not reading_valid:
# reading not valid, report comm error and turn pump off
logging.warning("unable to get reading. pump off.")
self.state = COMM_ERROR
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
elif reading_valid and (float(reading["level"]) >= float(self.desired_level)):
# reading is valid but current level >= desired level, turn pump off and sleep
logging.debug("not running pump, level is {level} desired is {desired}"
.format(level=reading["level"], desired=self.desired_level))
self.state = OFF
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
elif reading_valid and (float(reading["level"]) < float(self.desired_level)):
# valid reading, ideally we want to run the pump. check our cooldown time and pipe break.
if (self.state is not ON and self.cooldown_allows_running()) or \
(self.state is ON and self.pipe_break_detect_allows_running(
) and self.max_runtime_allows_running()):
# sweet, we can run.
logging.info("running pump, level is {level} desired is {desired}"
.format(level=reading["level"], desired=self.desired_level))
self.state = ON
self._pump_on(level_at_pump_on=float(reading["level"]))
time.sleep(self.sleep_between_readings_seconds)
#put back to sleep while pump is running - will automatically check reading
continue
elif self.state is OFF or self.state is COMM_ERROR and not self.cooldown_allows_running():
logging.info("not pump, level is {level} desired is {desired}, within cooldown period"
.format(level=reading["level"], desired=self.desired_level))
self.state = OFF
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
elif self.state is ON and not self.max_runtime_allows_running():
logging.info("not pump, level is {level} desired is {desired}, exceeded max runtime"
.format(level=reading["level"], desired=self.desired_level))
self.state = OFF
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
elif self.state is ON and not self.pipe_break_detect_allows_running(
):
logging.warning("fault! level is {level} desired is {desired}, pipe break fault suspected"
.format(level=reading["level"], desired=self.desired_level))
self.state = FAULT
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
else:
logging.warning(
"fault! level is {level} desired is {desired} state is {state}, unsupported state condition!"
.format(level=reading["level"],
desired=self.desired_level,
state=self.state))
self.state = FAULT
self._pump_off()
time.sleep(self.sleep_between_readings_seconds)
continue
finally:
logging.info("exiting, about to turn pump off")
self.initialstate.report_state(OFF)
self._pump_off()
logging.info("exiting, pump is off, exiting.")
def pipe_break_detect_allows_running(self):
total_time_running_secs = abs(datetime.datetime.now().timestamp() - self.pump_on_time.timestamp())
logging.debug(f"total running time {total_time_running_secs} history {self.pump_on_history}")
if total_time_running_secs < self.level_must_move_in_seconds:
return True
else:
running_value_change = []
for i in range(1, len(self.pump_on_history)):
running_value_change.append(
abs(self.pump_on_history[i]["level"] - self.pump_on_history[i - 1]["level"]))
total_value_change = sum(running_value_change)
logging.debug(
f"total running time {total_time_running_secs} history {self.pump_on_history} "
f"total change {total_value_change} threshold {self.level_change_threshold}")
return total_value_change > float(self.level_change_threshold)
def max_runtime_allows_running(self):
total_time_running_secs = abs(datetime.datetime.now().timestamp() - self.pump_on_time.timestamp())
logging.debug(f"total running time {total_time_running_secs} max allowed time {self.max_run_time_seconds}")
return float(total_time_running_secs) < float(self.max_run_time_seconds)
def cooldown_allows_running(self):
total_time_in_cooldown_secs = abs(datetime.datetime.now().timestamp() - self.pump_off_time.timestamp())
logging.debug("total time in cooldown {total_time_in_cooldown_secs} cooldown time {cooldown_time}"
.format(total_time_in_cooldown_secs=total_time_in_cooldown_secs,
cooldown_time=self.cooldown_seconds))
return float(total_time_in_cooldown_secs) > float(self.cooldown_seconds)
``` |
{
"source": "2black0/quadcopter-control",
"score": 4
} |
#### File: 2black0/quadcopter-control/simple-pendulum.py
```python
import numpy as np
from time import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.integrate import solve_ivp
# setup plots
fig = plt.figure()
ax1 = fig.add_subplot(121, aspect='equal', xlim = (-1, 1), ylim = (-1.5, 0.5), title = "Pendulum Animation")
ax2 = fig.add_subplot(122, xlim = (-2*np.pi, 2*np.pi), ylim = (-15, 15), title = "Phase Space Plot")
ax2.set_xlabel(r"$\Theta$[rad]")
ax2.set_ylabel(r"$\dot{\Theta}$[rad/s]")
ax1.grid()
ax2.grid()
line, = ax1.plot([], [], 'o-', lw=1) # pendulum arm
point, = ax2.plot([],[], 'ro') # position in phase space
# pendulum parameters
theta_0 = [np.pi/8, 0.0] # theta_0[1] = initial angular velocity
g = 9.81
L = 1.0
m = 1.0
I = m*L**2/3 # moment of inertia for a rod pendulum
omega = np.sqrt((m*g*L)/(2*I))
# animation parameters
origin = [0.0, 0.0]
dt = 0.05
frames = 600
t_span = [0.0, frames * dt]
def Hamiltonian(q, p):
H = p**2 / (6*m*L**2) + m*g*L/2*(1-np.cos(q))
return H
def eqn(t, theta_0):
# f = [theta, theta_dot]
# returns f'
return [theta_0[1], -omega**2 * np.sin(theta_0[0])]
ts = np.linspace(t_span[0], t_span[1], frames)
pendulum_state = solve_ivp(eqn, t_span, theta_0, t_eval = ts)
# phase space data points
# (optional) this code snippet could be refactored in terms of pendulum_state.y[][]
x = np.linspace(-2*np.pi, 2*np.pi, frames)
y = np.linspace(-15, 15, frames)
ThetaGrid, Theta_dotGrid = np.meshgrid(x, y)
q = ThetaGrid # generalised coordinate
p = m * L**2 * Theta_dotGrid # generalise momementum
cs = ax2.contour(ThetaGrid, Theta_dotGrid, Hamiltonian(q,p))
def animate(i):
theta = pendulum_state.y[0][i]
theta_dot = pendulum_state.y[1][i]
x = [origin[0], L * np.sin(theta)]
y = [origin[1], -L * np.cos(theta)]
line.set_data(x, y)
point.set_data(theta, theta_dot)
return line, point,
t0 = time()
animate(0) #sample time required to evaluate function
t1 = time()
interval = 1000 * dt - (t1 - t0)
ani = animation.FuncAnimation(fig, animate, frames = frames, interval = interval)
plt.show()
``` |
{
"source": "2BlackCoffees/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: 2BlackCoffees/CarND-Behavioral-Cloning-P3/model.py
```python
from math import ceil
from keras.models import Model
import matplotlib.pyplot as plt
import csv
import cv2
import numpy as np
import os
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers import Cropping2D, Flatten, Dropout, Dense, Lambda, Conv2D
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
import random
from os import path
data_path = 'data3'
print ("Analyzing data from directory %s" % data_path)
def plot_model(model, train_generator, train_samples, validation_generator, validation_samples, nbepochs):
history_object = model.fit_generator(train_generator, validation_data =
validation_generator,
nb_val_samples = len(validation_samples),
nb_epoch=nbepochs, verbose=1)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
def generator(samples, batch_size=32):
num_samples = len(samples)
base_path = './%s/' % data_path
correction_factor = [0.25, 0, -0.25] # Read http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
while 1: # Loop forever so the generator never terminates
samples = shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for line in batch_samples:
for i in range(3):
source_path = line[i]
file_name = source_path.split('\\')[-1]
current_path = base_path + file_name
image = cv2.imread(current_path)
#image[:,:,0] = cv2.resize(image.squeeze(), (320,160))
measurement = float(line[3]) + correction_factor[i]
images.append(image)
measurements.append(measurement)
if np.random.uniform()>0.5:
image_flipped = np.fliplr(image)
measurement_flipped = -measurement
images.append(image_flipped)
measurements.append(measurement_flipped)
if np.random.uniform()>0.5:
pix2angle = -0.05 #Opposed direction
latShift = random.randint(-5,5)
M = np.float32([[1,0,latShift],[0,1,0]])
img_translated = cv2.warpAffine(image,M,(image.shape[1],image.shape[0]))
images.append(img_translated)
measurements.append(measurement)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(measurements)
yield sklearn.utils.shuffle(X_train, y_train)
# Set this to True only in google Colab
colab = False
nbepoch = 3
batch_size=32
ch, row, col = 3, 160, 320 # Trimmed image format
# compile and train the model using the generator function
samples = []
with open('./' + data_path + '/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
latest_model_name = "model.h5"
if path.exists(latest_model_name):
print("Opening existing model %s" % latest_model_name)
model = load_model(latest_model_name)
else:
print("Creating a new model")
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (row, col, ch)))
model.add(Cropping2D(cropping = ((60,25), (0, 0)))) # Crops 70 fom the tp, 5 from the bottom, 0 from the left, 0 from the right.
model.add(Conv2D(filters=24,kernel_size=(5,5),activation="relu"))
model.add(MaxPooling2D())
model.add(Conv2D(filters=36,kernel_size=(5,5),activation="relu"))
model.add(MaxPooling2D())
model.add(Conv2D(filters=48,kernel_size=(5,5),activation="relu"))
model.add(MaxPooling2D())
model.add(Conv2D(filters=64,kernel_size=(3,3),activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),activation="relu"))
model.add(Flatten())
if colab:
# Google colab handles this additional parameters quite smoothly
model.add(Dropout(0.5))
model.add(Dense(1164))
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
# NVidia network: https://classroom.udacity.com/nanodegrees/nd013/parts/168c60f1-cc92-450a-a91b-e427c326e6a7/modules/6b6c37bc-13a5-47c7-88ed-eb1fce9789a0/lessons/3fc8dd70-23b3-4f49-86eb-a8707f71f8dd/concepts/7f68e171-cf87-40d2-adeb-61ae99fe56f5
#plot_model(model, train_generator, train_samples, validation_generator, validation_samples, nbepoch)
if colab:
num_samples = len(samples)
base_path = './data/'
correction_factor = [0.25, 0, -0.25] # Read http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
samples = shuffle(samples)
for epoch in range(nbepoch):
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for line in batch_samples:
for i in range(3):
source_path = line[i]
file_name = source_path.split('\\')[-1]
current_path = base_path + file_name
image = cv2.imread(current_path)
#image[:,:,0] = cv2.resize(image.squeeze(), (320,160))
measurement = float(line[3]) + correction_factor[i]
images.append(image)
measurements.append(measurement)
if np.random.uniform()>0.3:
image_flipped = np.fliplr(image)
measurement_flipped = -measurement
images.append(image_flipped)
measurements.append(measurement_flipped)
if np.random.uniform()>0.3:
pix2angle = -0.05 #Opposed direction
latShift = random.randint(-5,5)
M = np.float32([[1,0,latShift],[0,1,0]])
img_translated = cv2.warpAffine(image,M,(image.shape[1],image.shape[0]))
images.append(img_translated)
measurements.append(measurement)
X = np.array(images)
y = np.array(measurements)
X, y = sklearn.utils.shuffle(X, y)
print("Running offset %d (out of %d, batch_size: %d) epoch: %d (out of %d) " % (offset, num_samples, batch_size, epoch, nbepoch))
model.fit(x=X, y=y, batch_size=None, epochs=1, verbose=1, callbacks=None,
validation_split=0.2, validation_data=None, shuffle=True, class_weight=None,
sample_weight=None, initial_epoch=0, steps_per_epoch=None,
validation_steps=None, validation_batch_size=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=True)
print("Saving model\n")
model.save('model-epoch%d.h5' % epoch)
model.save('model.h5')
else:
from workspace_utils import active_session
with active_session():
model.fit_generator(train_generator, steps_per_epoch=ceil(len(train_samples)/batch_size), validation_data=validation_generator, validation_steps=ceil(len(validation_samples)/batch_size), epochs=nbepoch, verbose=1)
model.save('model.h5')
``` |
{
"source": "2BlackCoffees/checkovCustomPolicy",
"score": 2
} |
#### File: checkovCustomPolicy/checkcov-orgpolicies/check_gcp_org_policies.py
```python
from lark import Token
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckResult, CheckCategories
import pprint
import yaml
import os
import re
from pathlib import Path
import glob
pp = pprint.PrettyPrinter(indent=4)
class CheckGCPOrgPolicies(BaseResourceCheck):
def __init__(self):
name = "Ensure Org policies are not changed"
id = "CKV_GCP_999"
supported_resources = [resource_name for resource_name in os.environ['RESOURCES_TO_CHECK'].split(',')]
test = [3]
categories = [ CheckCategories.IAM ]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
@staticmethod
def read_yml():
yml_file_name=os.environ['YML_INPUT_ORG_POLICIES']
yml_file = Path(yml_file_name)
if not yml_file.is_file():
print("ERROR_POLICY: File %s could not be found." % yml_file)
return None
with open(yml_file_name, 'r') as stream:
yml_policies = yaml.safe_load(stream)
return yml_policies
def get_current_file_name(self):
file_name = self.entity_path.split(':')[0]
return re.sub(r"^\/*", "", file_name)
@staticmethod
def check_all_files_exist(yml_policies):
existing_file_list_in_dir = glob.glob("*")
file_list_policies = [policies['file_name'] for policies in yml_policies]
for file_name in file_list_policies:
if file_name not in existing_file_list_in_dir:
print("ERROR_POLICY: File name %s could not be found !" % file_name)
print("ERROR_POLICY: Existing file list in directory: %s" % ", ".join(existing_file_list_in_dir))
print("ERROR_POLICY: Expected file list from yml file: %s" % ", ".join(file_list_policies))
return False
else:
print("INFO_POLICY: OK file %s exists." % file_name)
return True
def get_policy_for_current_file(self, yml_policies):
current_tf_file_name = self.get_current_file_name()
for yml_node in yml_policies:
if current_tf_file_name == yml_node['file_name']:
return yml_node
return None
def append_output_yml(self, conf):
yml_file_name=os.environ['YML_OUTPUT_ORG_POLICIES']
yml_file = Path(yml_file_name)
yml_content = None
if not yml_file.is_file():
yml_content = {'org_policies': []}
print("DEBUG_POLICY: Creating new data structure to file %s." % yml_file_name)
else:
with open(yml_file_name, 'r') as stream:
yml_content = yaml.safe_load(stream)
print("DEBUG_POLICY: Read existing data from %s " % yml_file_name)
file_policies = self.get_policy_for_current_file(yml_content['org_policies'])
if file_policies is None:
file_policies = {'file_name': self.get_current_file_name()}
yml_content['org_policies'].append(file_policies)
print("DEBUG_POLICY: File %s not found in data structure. Initializing data structure as follows: %s" % (self.get_current_file_name(), pp.pformat(file_policies)))
node_name_exists = True
index = 0
base_node_name = "--".join(conf['constraint'])
while node_name_exists:
node_name_exists = False
new_node_name = '%s_%d' % (base_node_name, index)
for existing_node_name in file_policies.keys():
if existing_node_name == new_node_name:
node_name_exists = True
index = index + 1
break
print("DEBUG_POLICY: Using unique name %s for the new node." % new_node_name)
file_policies[new_node_name] = conf
#pp.pprint(yml_content)
with open(yml_file_name, 'w') as stream:
yaml.dump(yml_content, stream)
def scan_resource_conf(self, conf):
self.append_output_yml(conf)
yml_policies = CheckGCPOrgPolicies.read_yml()
if yml_policies is None:
return CheckResult.FAILED
print("DEBUG_POLICY: Reference policy is %s." % pp.pformat(yml_policies))
yml_policies = yml_policies['org_policies']
if not CheckGCPOrgPolicies.check_all_files_exist(yml_policies):
return CheckResult.FAILED
current_policies = self.get_policy_for_current_file(yml_policies)
for policy in current_policies.values():
if policy == conf:
print("INFO_POLICY: Policy %s found in file %s" % ("--".join(conf['constraint']), self.get_current_file_name()))
return CheckResult.PASSED
print("ERROR_POLICY: Policy could not be found in file %s!" % self.get_current_file_name())
print("ERROR_POLICY: List of described policies: %s." % pp.pformat(current_policies))
print("ERROR_POLICY: List of terraform policies: %s." % pp.pformat(conf))
return CheckResult.FAILED
scanner = CheckGCPOrgPolicies()
```
#### File: 2BlackCoffees/checkovCustomPolicy/diffyml.py
```python
from deepdiff import DeepDiff
import yaml
import os
import sys
from pathlib import Path
def read_yml_file(yml_file_name):
yml_file = Path(yml_file_name)
if not yml_file.is_file():
print("ERROR: File %s could not be found." % yml_file)
return None
with open(yml_file_name, 'r') as stream:
yml_policies = yaml.safe_load(stream)
return yml_policies
def print_usage():
print ('Usage: %s yml_file_1 yml_file_2' % sys.argv[0])
def get_parameters():
if len(sys.argv) != 3:
print_usage()
exit(1)
return sys.argv[1:]
if __name__ == '__main__':
filename_in_yml_policies, filename_out_yml_policies = get_parameters()
in_yml_policies = read_yml_file(filename_in_yml_policies)
out_yml_policies = read_yml_file(filename_out_yml_policies)
ddiff = DeepDiff(in_yml_policies, out_yml_policies, ignore_order=True)
if len(ddiff) > 0:
print('ERROR: Policies differ: %s' % ddiff, file=sys.stderr)
exit(1)
print('OK Described policy and Terraform ones are the same.')
exit(0)
``` |
{
"source": "2bndy5/breathe",
"score": 2
} |
#### File: breathe/directives/setup.py
```python
from breathe.directives.class_like import (
DoxygenStructDirective,
DoxygenClassDirective,
DoxygenInterfaceDirective,
)
from breathe.directives.content_block import (
DoxygenNamespaceDirective,
DoxygenGroupDirective,
DoxygenPageDirective,
)
from breathe.directives.file import DoxygenFileDirective, AutoDoxygenFileDirective
from breathe.directives.function import DoxygenFunctionDirective
from breathe.directives.index import DoxygenIndexDirective, AutoDoxygenIndexDirective
from breathe.directives.item import (
DoxygenVariableDirective,
DoxygenDefineDirective,
DoxygenUnionDirective,
DoxygenConceptDirective,
DoxygenEnumDirective,
DoxygenEnumValueDirective,
DoxygenTypedefDirective,
)
from breathe.parser import DoxygenParserFactory
from breathe.project import ProjectInfoFactory
from breathe.process import AutoDoxygenProcessHandle
from sphinx.application import Sphinx
import os
import subprocess
def setup(app: Sphinx) -> None:
directives = {
"doxygenindex": DoxygenIndexDirective,
"autodoxygenindex": AutoDoxygenIndexDirective,
"doxygenfunction": DoxygenFunctionDirective,
"doxygenstruct": DoxygenStructDirective,
"doxygenclass": DoxygenClassDirective,
"doxygeninterface": DoxygenInterfaceDirective,
"doxygenvariable": DoxygenVariableDirective,
"doxygendefine": DoxygenDefineDirective,
"doxygenconcept": DoxygenConceptDirective,
"doxygenenum": DoxygenEnumDirective,
"doxygenenumvalue": DoxygenEnumValueDirective,
"doxygentypedef": DoxygenTypedefDirective,
"doxygenunion": DoxygenUnionDirective,
"doxygennamespace": DoxygenNamespaceDirective,
"doxygengroup": DoxygenGroupDirective,
"doxygenfile": DoxygenFileDirective,
"autodoxygenfile": AutoDoxygenFileDirective,
"doxygenpage": DoxygenPageDirective,
}
# The directives need these global objects, so in order to smuggle
# them in, we use env.temp_data. But it is cleared after each document
# has been read, we use the source-read event to set them.
# note: the parser factory contains a cache of the parsed XML
# note: the project_info_factory also contains some caching stuff
# TODO: is that actually safe for when reading in parallel?
project_info_factory = ProjectInfoFactory(app)
parser_factory = DoxygenParserFactory(app)
def set_temp_data(
app: Sphinx, project_info_factory=project_info_factory, parser_factory=parser_factory
):
assert app.env is not None
app.env.temp_data["breathe_project_info_factory"] = project_info_factory
app.env.temp_data["breathe_parser_factory"] = parser_factory
app.connect("source-read", lambda app, docname, source: set_temp_data(app))
for name, directive in directives.items():
app.add_directive(name, directive)
app.add_config_value("breathe_projects", {}, True) # Dict[str, str]
app.add_config_value("breathe_default_project", "", True) # str
# Provide reasonable defaults for domain_by_extension mapping. Can be overridden by users.
app.add_config_value(
"breathe_domain_by_extension", {"py": "py", "cs": "cs"}, True
) # Dict[str, str]
app.add_config_value("breathe_domain_by_file_pattern", {}, True) # Dict[str, str]
app.add_config_value("breathe_projects_source", {}, True)
app.add_config_value("breathe_build_directory", "", True)
app.add_config_value("breathe_default_members", (), True)
app.add_config_value("breathe_show_define_initializer", False, "env")
app.add_config_value("breathe_show_enumvalue_initializer", False, "env")
app.add_config_value("breathe_show_include", True, "env")
app.add_config_value("breathe_implementation_filename_extensions", [".c", ".cc", ".cpp"], True)
app.add_config_value("breathe_doxygen_config_options", {}, True)
app.add_config_value("breathe_doxygen_aliases", {}, True)
app.add_config_value("breathe_use_project_refids", False, "env")
app.add_config_value("breathe_order_parameters_first", False, "env")
app.add_config_value("breathe_separate_member_pages", False, "env")
breathe_css = "breathe.css"
if os.path.exists(os.path.join(app.confdir, "_static", breathe_css)):
app.add_css_file(breathe_css)
def write_file(directory, filename, content):
# Check the directory exists
if not os.path.exists(directory):
os.makedirs(directory)
# Write the file with the provided contents
with open(os.path.join(directory, filename), "w") as f:
f.write(content)
doxygen_handle = AutoDoxygenProcessHandle(
subprocess.check_call, write_file, project_info_factory
)
def doxygen_hook(app: Sphinx):
doxygen_handle.generate_xml(
app.config.breathe_projects_source,
app.config.breathe_doxygen_config_options,
app.config.breathe_doxygen_aliases,
)
app.connect("builder-inited", doxygen_hook)
```
#### File: breathe/finder/compound.py
```python
from breathe.finder import ItemFinder, stack
from breathe.renderer.filter import Filter
class DoxygenTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_: Filter, matches) -> None:
"""Find nodes which match the filter. Doesn't test this node, only its children"""
node_stack = stack(self.data_object, ancestors)
compound_finder = self.item_finder_factory.create_finder(self.data_object.compounddef)
compound_finder.filter_(node_stack, filter_, matches)
class CompoundDefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_: Filter, matches) -> None:
"""Finds nodes which match the filter and continues checks to children"""
node_stack = stack(self.data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
for sectiondef in self.data_object.sectiondef:
finder = self.item_finder_factory.create_finder(sectiondef)
finder.filter_(node_stack, filter_, matches)
for innerclass in self.data_object.innerclass:
finder = self.item_finder_factory.create_finder(innerclass)
finder.filter_(node_stack, filter_, matches)
class SectionDefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_: Filter, matches) -> None:
"""Find nodes which match the filter. Doesn't test this node, only its children"""
node_stack = stack(self.data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
for memberdef in self.data_object.memberdef:
finder = self.item_finder_factory.create_finder(memberdef)
finder.filter_(node_stack, filter_, matches)
class MemberDefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_: Filter, matches) -> None:
data_object = self.data_object
node_stack = stack(data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
if data_object.kind == "enum":
for value in data_object.enumvalue:
value_stack = stack(value, node_stack)
if filter_.allow(value_stack):
matches.append(value_stack)
class RefTypeSubItemFinder(ItemFinder):
def filter_(self, ancestors, filter_: Filter, matches) -> None:
node_stack = stack(self.data_object, ancestors)
if filter_.allow(node_stack):
matches.append(node_stack)
``` |
{
"source": "2bndy5/check-python-sources",
"score": 2
} |
#### File: check-python-sources/python_linter/__init__.py
```python
import io
import os
import logging
FOUND_RICH_LIB = False
try:
from rich.logging import RichHandler
FOUND_RICH_LIB = True
logging.basicConfig(
format="%(name)s: %(message)s",
handlers=[RichHandler(show_time=False)],
)
except ImportError:
logging.basicConfig()
#: The logging.Logger object used for outputting data.
logger = logging.getLogger("Python Checker")
if not FOUND_RICH_LIB:
logger.debug("rich module not found")
# setup a separate logger for using github log commands
log_commander = logger.getChild("LOG COMMANDER") # create a child of our logger obj
log_commander.setLevel(logging.DEBUG) # be sure that log commands are output
console_handler = logging.StreamHandler() # Create special stdout stream handler
console_handler.setFormatter(logging.Formatter("%(message)s")) # no formatted log cmds
log_commander.addHandler(console_handler) # Use special handler for log_commander
log_commander.propagate = False # prevent duplicate messages in the parent logger obj
class Globals:
"""Global variables for re-use (non-constant)."""
#: The responding payload containing info about changed files.
FILES = []
#: The parsed JSON of the event payload.
EVENT_PAYLOAD = {}
#: A shared response object for `requests` module.
response_buffer = None
class GlobalParser:
"""Global variables specific to output parsers. Each element in each of the
following attributes represents a clang-tool's output for 1 source file.
"""
#: This can only be a `list` of JSON-type `dict` (generated by pylint)
pylint_notes = []
#: This can only be a `list` of type ??? (not implemented yet)
black_advice = []
def start_log_group(name: str) -> None:
"""Begin a collapsable group of log statements.
Args:
name: The name of the collapsable group
"""
log_commander.fatal("::group::%s", name)
def end_log_group() -> None:
"""End a collapsable group of log statements."""
log_commander.fatal("::endgroup::")
def get_lines_from_file(file_path: str) -> list:
"""Get all the lines from a file as a list of strings.
:param str file_path: The path to the file.
:Returns: A list of lines (each a `str`).
"""
with open(file_path, encoding="utf-8") as temp:
return temp.readlines()
def get_line_cnt_from_cols(file_path: str, offset: int) -> tuple:
"""Gets a line count and columns offset from a file's absolute offset.
:param str file_path: Path to file.
:param int offset: The byte offset to translate
Returns:
A `tuple` of 2 `int` numbers:
- Index 0 is the line number for the given offset.
- Index 1 is the column number for the given offset on the line.
"""
line_cnt = 1
last_lf_pos = 0
cols = 1
file_path = file_path.replace("/", os.sep)
with io.open(file_path, "r", encoding="utf-8", newline="\n") as src_file:
src_file.seek(0, io.SEEK_END)
max_len = src_file.tell()
src_file.seek(0, io.SEEK_SET)
while src_file.tell() != offset and src_file.tell() < max_len:
char = src_file.read(1)
if char == "\n":
line_cnt += 1
last_lf_pos = src_file.tell() - 1 # -1 because LF is part of offset
if last_lf_pos + 1 > max_len:
src_file.newlines = "\r\n"
src_file.seek(0, io.SEEK_SET)
line_cnt = 1
cols = src_file.tell() - last_lf_pos
return (line_cnt, cols)
```
#### File: check-python-sources/tests/basic_test.py
```python
from typing import Union
def func_with_very_long_name(_a, lot: int, of_args: list[str], with_, some: bool, types) -> None:
for arg in of_args:
if some or lot == len(of_args):
return with_
return types
``` |
{
"source": "2bndy5/CircuitPython_2_Micropython",
"score": 2
} |
#### File: CircuitPython_2_Micropython/circuitpython2micropython/ubus_device.py
```python
from machine import Pin, I2C
class SPIDevice:
"""
Represents a single SPI device and manages initialization/deinitialization
(psuedo-locking) the bus and the device's CS (Chip Select) pin.
:param ~machine.SPI spi: The SPI bus that the device is on.
:param ~machine.Pin chip_select: The chip select pin object used as a digital output.
"""
def __init__(self, spi, *, chip_select=None,
baudrate=100000, polarity=0, phase=0):
self.spi = spi
self.spi.deinit()
self.baudrate = baudrate
self.polarity = polarity
self.phase = phase
self.chip_select = chip_select
if self.chip_select:
self.chip_select.switch_to_output(value=True)
@property
def frequency(self):
return self.baudrate
def __enter__(self):
self.spi.init(
baudrate=self.baudrate,
polarity=self.polarity,
phase=self.phase)
if self.chip_select:
self.chip_select.value = False
return self.spi
def __exit__(self, *exc):
if self.chip_select:
self.chip_select.value = True
self.spi.deinit()
return False
class I2CDevice:
"""Represents a single I2C device and manages initialization/deinitialization
(psuedo-locking) the bus and the device's slave address.
:param ~machine.I2 i2c: The I2C bus that the device is on.
:param int address: The I2C device's address. This is a 7-bit integer.
:param bool probe: if `True`, instantiation probes the I2C bus for a device
with a designated slave address that matches the above ``address`` parameter.
"""
def __init__(self, i2c, address, probe=True, scl=None, sda=None, frequency=None):
self.i2c = i2c
self.sda, self.scl = (scl, sda)
self.freq = frequency
self.device_address = address
if probe:
if not self.__probe_for_device():
raise ValueError("No I2C device at address: %x" % self.device_address)
def __probe_for_device(self):
for addr in self.i2c.scan():
if addr == self.device_address:
return True
return False
def readinto(self, buf, *, start=0, end=None, stop=True):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buffer: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include; if None, use ``len(buf)``
:param bool stop: `True` sends a STOP condition (special bit); `False` doesn't.
"""
if end is None:
end = len(buf)
self.i2c.readfrom_into(self.device_address, buf[start:end], stop=stop)
def write(self, buf, *, start=0, end=None, stop=True):
"""
Write the bytes from ``buffer`` to the device. Transmits a stop bit if
``stop`` is set.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buffer: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include; if None, use ``len(buf)``
:param bool stop: If true, output an I2C stop condition after the buffer is written
"""
if end is None:
end = len(buf)
self.i2c.writeto(self.device_address, buf[start:end], stop=stop)
# pylint: disable-msg=too-many-arguments
def write_then_readinto(self, out_buffer, in_buffer, *,
out_start=0, out_end=None, in_start=0, in_end=None):
"""
Write the bytes from ``out_buffer`` to the device, then immediately
reads into ``in_buffer`` from the device. The number of bytes read
will be the length of ``in_buffer``.
If ``out_start`` or ``out_end`` is provided, then the output buffer
will be sliced as if ``out_buffer[out_start:out_end]``. This will
not cause an allocation like ``buffer[out_start:out_end]`` will so
it saves memory.
If ``in_start`` or ``in_end`` is provided, then the input buffer
will be sliced as if ``in_buffer[in_start:in_end]``. This will not
cause an allocation like ``in_buffer[in_start:in_end]`` will so
it saves memory.
:param bytearray out_buffer: buffer containing the bytes to write
:param bytearray in_buffer: buffer containing the bytes to read into
:param int out_start: Index to start writing from
:param int out_end: Index to read up to but not include; if None, use ``len(out_buffer)``
:param int in_start: Index to start writing at
:param int in_end: Index to write up to but not include; if None, use ``len(in_buffer)``
"""
if out_end is None:
out_end = len(out_buffer)
if in_end is None:
in_end = len(in_buffer)
self.write(out_buffer, start=out_start, end=out_end, stop=False)
self.readinto(in_buffer, start=in_start, end=in_end)
#pylint: enable-msg=too-many-arguments
def __enter__(self):
# in micropython we need the `Pin` objects used for sda & scl parameters to I2C.init()
if self.scl is not None and self.sda is not None:
if self.freq is not None:
self.i2c = I2C(scl=self.scl, sda=self.sda, frequency=self.freq)
else:
self.i2c = I2C(scl=self.scl, sda=self.sda)
return self
def __exit__(self, *exc):
return False
``` |
{
"source": "2bndy5/CircuitPython-Cirque-Pinnacle",
"score": 3
} |
#### File: CircuitPython-Cirque-Pinnacle/examples/cirque_pinnacle_anymeas_test.py
```python
import time
import struct
import board
from digitalio import DigitalInOut
import circuitpython_cirque_pinnacle.glidepoint as Pinnacle
dr_pin = DigitalInOut(board.D2)
# NOTE The dr_pin is a required keyword argument to the
# constructor when using AnyMeas mode
# if using a trackpad configured for SPI
spi = board.SPI()
ss_pin = DigitalInOut(board.D7)
tpad = Pinnacle.PinnacleTouchSPI(spi, ss_pin, dr_pin=dr_pin)
# if using a trackpad configured for I2C
# i2c = board.I2C()
# tpad = Pinnacle.PinnacleTouchI2C(i2c, dr_pin=dr_pin)
# if dr_pin was not specified upon instantiation.
# this command will raise an AttributeError exception
tpad.data_mode = Pinnacle.ANYMEAS
# setup toggle and polarity bits for measuring with PNP gate muxing
class MeasVector:
"""A blueprint matrix used to manipulate the measurements' vector"""
def __init__(self, toggle, polarity):
self.toggle = toggle
self.polarity = polarity
vectors = []
# This toggles Y0 only and toggles it positively
vectors.append(MeasVector(0x00010000, 0x00010000))
# This toggles Y0 only and toggles it negatively
vectors.append(MeasVector(0x00010000, 0x00000000))
# This toggles X0 only and toggles it positively
vectors.append(MeasVector(0x00000001, 0x00000000))
# This toggles X16 only and toggles it positively
vectors.append(MeasVector(0x00008000, 0x00000000))
# This toggles Y0-Y7 negative and X0-X7 positive
vectors.append(MeasVector(0x00FF00FF, 0x000000FF))
idle_vectors = [0] * len(vectors)
def compensate(count=5):
"""take ``count`` measurements, then average them together """
for i, vector in enumerate(vectors):
idle_vectors[i] = 0
for _ in range(count):
result = struct.unpack(
"h",
tpad.measure_adc(vector.toggle, vector.polarity)
)[0]
idle_vectors[i] += result
idle_vectors[i] /= count
print("compensation {}: {}".format(i, idle_vectors[i]))
def take_measurements(timeout=10):
"""read ``len(vectors)`` number of measurements and print results for
``timeout`` number of seconds."""
start = time.monotonic()
while time.monotonic() - start < timeout:
for i, vector in enumerate(vectors):
result = struct.unpack(
"h",
tpad.measure_adc(vector.toggle, vector.polarity)
)[0]
print("vector{}: {}".format(i, result - idle_vectors[i]), end="\t")
print()
``` |
{
"source": "2bndy5/roboclaw",
"score": 3
} |
#### File: roboclaw/roboclaw/usart_serial_ctx.py
```python
MICROPY = False
try:
from busio import UART
except ImportError: # running on a MicroPython board
from machine import UART
MICROPY = True
class SerialUART(UART):
"""A wrapper class for MicroPython's machine.UART class to utilize python's context
manager. This wrapper may be incomplete as it is specialized for use with this library
only as a drop-in replacement for CircuitPython's `busio.UART` or PySerial's
`~serial.Serial` module API.
:param ~microcontroller.Pin tx_pin: The pin used for sending data.
:param ~microcontroller.Pin rx_pin: The pin used for receiving data.
:param int baudrate: The baudrate of the Serial port. Defaults to ``9600``.
:param int bits: The number of bits per byte. Options are limited to ``8`` or ``9``.
Defaults to ``8``.
:param int parity: This parameter is optional. The parity controls how the bytes are
handled with respect the raising or falling edge of the clock signal. Options are
limited to ``None``, ``0`` (even), or ``1`` (odd). Defaults to ``None``.
:param int stop: The number of stop bits to be used to signify the end of the buffer
payload (kinda like the null character in a C-style string). Options are limited to
``1`` or ``2``. Defaults to ``1``.
"""
def __init__(self, tx_pin=None, rx_pin=None, baudrate=9600, bits=8, parity=None, stop=1):
if MICROPY:
super(SerialUART, self).__init__(
tx=tx_pin, rx=rx_pin, baudrate=baudrate, bits=bits, parity=parity, stop=stop
)
else:
super(SerialUART, self).__init__(
tx_pin, rx_pin, baudrate=baudrate, bits=bits, parity=parity, stop=stop
)
def __enter__(self):
"""Used to reinitialize serial port with the correct configuration ("enter"
``with`` block)"""
if MICROPY:
self.init(
baudrate=self.baudrate,
bits=self.bits,
parity=self.parity,
stop=self.stop,
tx=self.tx_pin,
rx=self.rx_pin)
return self
return super().__enter__()
# pylint: disable=arguments-differ
def __exit__(self, *exc):
"""Deinitialize the serial port ("exit" ``with`` block)"""
if MICROPY:
self.deinit()
return False
return super().__exit__(*exc)
def in_waiting(self):
"""The number of bytes waiting to be read on the open Serial port."""
return self.any()
def close(self):
""" deinitialize the port """
self.deinit()
def read_until(self, size=None):
"""return a `bytearray` of received data.
:param int size: If left unspecified, returns everything in the buffer terminated
with a ``\n`` or internal timeout occurs. If specified, then returns everything the
buffer up to at most the ``size`` number of bytes or internal timeout occurs"""
if size is None:
return self.readline()
return self.read(size)
``` |
{
"source": "2bndy5/rst2pdf",
"score": 2
} |
#### File: rst2pdf/rst2pdf/pdfbuilder.py
```python
from copy import copy
from io import BytesIO
import logging
import os
import os.path
import re
import sys
import time
from urllib.parse import urlunparse
from docutils import writers
from docutils import nodes
from docutils.transforms.parts import Contents
from docutils.io import FileOutput
import docutils.core
import jinja2
from pygments.lexers import guess_lexer
import sphinx
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.locale import _
from sphinx.transforms import SphinxTransform
from sphinx.util.console import darkgreen, red
from sphinx.util import SEP
import rst2pdf
from rst2pdf import createpdf
from rst2pdf.directives import code_block
from rst2pdf.log import log
from rst2pdf.languages import get_language_available
if sphinx.__version__ >= '2.1':
from sphinx.errors import NoUri
else:
from sphinx.environment import NoUri
class PDFBuilder(Builder):
name = 'pdf'
out_suffix = '.pdf'
def init(self):
self.docnames = []
self.document_data = []
self.sphinx_logger = sphinx.util.logging.getLogger(__name__)
def write(self, *ignored):
self.init_document_data()
if self.config.pdf_verbosity > 1:
log.setLevel(logging.DEBUG)
elif self.config.pdf_verbosity > 0:
log.setLevel(logging.INFO)
for entry in self.document_data:
try:
docname, targetname, title, author = entry[:4]
# Custom options per document
if len(entry) > 4 and isinstance(entry[4], dict):
opts = entry[4]
else:
opts = {}
self.sphinx_logger.info("processing " + targetname + "... ")
self.opts = opts
class dummy:
extensions = self.config.pdf_extensions
createpdf.add_extensions(dummy())
self.page_template = opts.get(
'pdf_page_template', self.config.pdf_page_template
)
docwriter = PDFWriter(
self,
stylesheets=opts.get(
'pdf_stylesheets', self.config.pdf_stylesheets
),
language=opts.get('pdf_language', self.config.pdf_language),
breaklevel=opts.get('pdf_break_level', self.config.pdf_break_level),
breakside=opts.get('pdf_breakside', self.config.pdf_breakside),
fontpath=opts.get('pdf_font_path', self.config.pdf_font_path),
fitmode=opts.get('pdf_fit_mode', self.config.pdf_fit_mode),
compressed=opts.get('pdf_compressed', self.config.pdf_compressed),
inline_footnotes=opts.get(
'pdf_inline_footnotes', self.config.pdf_inline_footnotes
),
splittables=opts.get(
'pdf_splittables', self.config.pdf_splittables
),
repeat_table_rows=opts.get(
'pdf_repeat_table_rows', self.config.pdf_repeat_table_rows
),
default_dpi=opts.get(
'pdf_default_dpi', self.config.pdf_default_dpi
),
page_template=self.page_template,
invariant=opts.get('pdf_invariant', self.config.pdf_invariant),
real_footnotes=opts.get(
'pdf_real_footnotes', self.config.pdf_real_footnotes
),
use_toc=opts.get('pdf_use_toc', self.config.pdf_use_toc),
toc_depth=opts.get('pdf_toc_depth', self.config.pdf_toc_depth),
use_coverpage=opts.get(
'pdf_use_coverpage', self.config.pdf_use_coverpage
),
use_numbered_links=opts.get(
'pdf_use_numbered_links', self.config.pdf_use_numbered_links
),
fit_background_mode=opts.get(
'pdf_fit_background_mode', self.config.pdf_fit_background_mode
),
baseurl=opts.get('pdf_baseurl', self.config.pdf_baseurl),
section_header_depth=opts.get(
'section_header_depth', self.config.section_header_depth
),
srcdir=self.srcdir,
style_path=opts.get('pdf_style_path', self.config.pdf_style_path),
config=self.config,
)
tgt_file = os.path.join(self.outdir, targetname + self.out_suffix)
destination = FileOutput(
destination=open(tgt_file, 'wb'), encoding='utf-8'
)
doctree = self.assemble_doctree(
docname,
title,
author,
appendices=opts.get('pdf_appendices', self.config.pdf_appendices)
or [],
)
doctree.settings.author = author
doctree.settings.title = title
self.sphinx_logger.info("done")
self.sphinx_logger.info("writing " + targetname + "... ")
docwriter.write(doctree, destination)
self.sphinx_logger.info("done")
except Exception:
log.exception('Failed to build doc')
self.sphinx_logger.info(red("FAILED"))
def init_document_data(self):
preliminary_document_data = map(list, self.config.pdf_documents)
if not preliminary_document_data:
self.warn(
'no "pdf_documents" config value found; no documents ' 'will be written'
)
return
# assign subdirs to titles
self.titles = []
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
self.warn(
'"pdf_documents" config value references unknown '
'document %s' % docname
)
continue
self.document_data.append(entry)
if docname.endswith(SEP + 'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def assemble_doctree(self, docname, title, author, appendices):
# FIXME: use the new inline_all_trees from Sphinx.
# check how the LaTeX builder does it.
self.docnames = set([docname])
self.sphinx_logger.info(darkgreen(docname) + " ")
def process_tree(docname, tree):
tree = tree.deepcopy()
for toctreenode in tree.traverse(addnodes.toctree):
newnodes = []
includefiles = map(str, toctreenode['includefiles'])
for includefile in includefiles:
try:
self.sphinx_logger.info(darkgreen(includefile) + " ")
subtree = process_tree(
includefile, self.env.get_doctree(includefile)
)
self.docnames.add(includefile)
except Exception:
self.warn(
'%s: toctree contains ref to nonexisting file %r'
% (docname, includefile)
)
else:
sof = addnodes.start_of_file(docname=includefile)
sof.children = subtree.children
newnodes.append(sof)
toctreenode.parent.replace(toctreenode, newnodes)
return tree
tree = self.env.get_doctree(docname)
tree = process_tree(docname, tree)
self.docutils_languages = {}
if self.config.language:
self.docutils_languages[self.config.language] = get_language_available(
self.config.language
)[2]
if self.opts.get('pdf_use_index', self.config.pdf_use_index):
# Add index at the end of the document
# This is a hack. create_index creates an index from
# ALL the documents data, not just this one.
# So, we preserve a copy, use just what we need, then
# restore it.
t = copy(self.env.indexentries)
try:
self.env.indexentries = {
docname: self.env.indexentries[docname + '-gen']
}
except KeyError:
self.env.indexentries = {}
for dname in self.docnames:
self.env.indexentries[dname] = t.get(dname, [])
genindex = IndexEntries(self.env).create_index(self)
self.env.indexentries = t
# EOH (End Of Hack)
if genindex: # No point in creating empty indexes
index_nodes = genindex_nodes(genindex)
tree.append(nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
tree.append(index_nodes)
# This is stolen from the HTML builder's prepare_writing function
self.domain_indices = []
# html_domain_indices can be False/True or a list of index names
indices_config = self.config.pdf_domain_indices
if indices_config and hasattr(self.env, 'domains'):
for domain in self.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
# deprecated config value
if indexname == 'py-modindex' and not self.config.pdf_use_modindex:
continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
(indexname, indexcls, content, collapse)
)
# self.domain_indices contains a list of indices to generate, like
# this:
# [('py-modindex',
# <class 'sphinx.domains.python.PythonModuleIndex'>,
# [(u'p', [[u'parrot', 0, 'test', u'module-parrot', 'Unix, Windows',
# '', 'Analyze and reanimate dead parrots.']])], True)]
# Now this in the HTML builder is passed onto write_domain_indices.
# We handle it right here
for indexname, indexcls, content, collapse in self.domain_indices:
# In HTML this is handled with a Jinja template, domainindex.html
# We have to generate docutils stuff right here in the same way.
self.sphinx_logger.info(' ' + indexname)
output = ['DUMMY', '=====', '', '.. _modindex:\n\n']
t = indexcls.localname
t += '\n' + '=' * len(t) + '\n'
output.append(t)
for letter, entries in content:
output.append('.. cssclass:: heading4\n\n%s\n\n' % letter)
for (
name,
grouptype,
page,
anchor,
extra,
qualifier,
description,
) in entries:
if qualifier:
q = '[%s]' % qualifier
else:
q = ''
if extra:
e = '(%s)' % extra
else:
e = ''
output.append('`%s <#%s>`_ %s %s' % (name, anchor, e, q))
output.append(' %s' % description)
output.append('')
dt = docutils.core.publish_doctree('\n'.join(output))[1:]
dt.insert(0, nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
tree.extend(dt)
if appendices:
tree.append(
nodes.raw(text='OddPageBreak %s' % self.page_template, format='pdf')
)
self.sphinx_logger.info('')
self.sphinx_logger.info('adding appendixes...')
for docname in appendices:
self.sphinx_logger.info(darkgreen(docname) + " ")
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
tree.append(appendix)
self.sphinx_logger.info('done')
# Replace Sphinx's HighlightLanguageTransform with our own for sphinx version between 1.8.0 & less than 2.0.0 as
# Sphinx's HighlightLanguageTransform breaks linenothreshold setting in the highlight directive (See issue #721)
# This code can be removed when we drop support for Python 2
if sphinx.__version__ > '1.7.9' and sphinx.__version__ < '2.0.0':
for i in range(len(self.env.app.registry.post_transforms)):
if (
self.env.app.registry.post_transforms[i].__name__
== 'HighlightLanguageTransform'
):
self.env.app.registry.post_transforms[
i
] = HighlightLanguageTransform
break
self.sphinx_logger.info("resolving references...")
self.env.resolve_references(tree, docname, self)
for pendingnode in tree.traverse(addnodes.pending_xref):
# This needs work, need to keep track of all targets
# so I don't replace and create hanging refs, which
# crash
if (
pendingnode.get('reftarget', None) == 'genindex'
and self.config.pdf_use_index
):
pendingnode.replace_self(
nodes.reference(
text=pendingnode.astext(), refuri=pendingnode['reftarget']
)
)
# FIXME: probably need to handle dangling links to domain-specific indexes
else:
# FIXME: This is from the LaTeX builder and I still don't understand it
# well, and doesn't seem to work
# resolve :ref:s to distant tex files -- we can't add a cross-reference,
# but append the document name
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes = [nodes.emphasis(sectname, sectname)]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
newnodes.append(nodes.emphasis(title, title))
newnodes.append(nodes.Text(')', ')'))
break
else:
pass
pendingnode.replace_self(newnodes)
# else:
# pass
return tree
def get_target_uri(self, docname, typ=None):
# print 'GTU',docname,typ
# FIXME: production lists are not supported yet!
if typ == 'token':
# token references are always inside production lists and must be
# replaced by \token{} in LaTeX
return '@token'
if docname not in self.docnames:
# It can be a 'main' document:
for doc in self.document_data:
if doc[0] == docname:
return "pdf:" + doc[1] + '.pdf'
# It can be in some other document's toctree
for indexname, toctree in self.env.toctree_includes.items():
if docname in toctree:
for doc in self.document_data:
if doc[0] == indexname:
return "pdf:" + doc[1] + '.pdf'
# No idea
raise NoUri
else: # Local link
return ""
def get_relative_uri(self, from_, to, typ=None):
# ignore source path
return self.get_target_uri(to, typ)
def get_outdated_docs(self):
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
targetname = self.env.doc2path(docname, self.outdir, self.out_suffix)
try:
targetmtime = os.path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = os.path.getmtime(self.env.doc2path(docname))
if srcmtime > targetmtime:
yield docname
except EnvironmentError:
# source doesn't exist anymore
pass
def genindex_nodes(genindexentries):
indexlabel = _('Index')
indexunder = '=' * len(indexlabel)
output = ['DUMMY', '=====', '.. _genindex:\n\n', indexlabel, indexunder, '']
for key, entries in genindexentries:
output.append('.. cssclass:: heading4\n\n%s\n\n' % key) # initial
for entryname, entryvalue in entries:
links, subitems = entryvalue[0:2]
if links:
output.append('`%s <#%s>`_' % (entryname, nodes.make_id(links[0][1])))
for i, link in enumerate(links[1:]):
output[-1] += ' `[%s] <#%s>`_ ' % (i + 1, nodes.make_id(link[1]))
output.append('')
else:
output.append(entryname)
if subitems:
for subentryname, subentrylinks in subitems:
if subentrylinks:
output.append(
' `%s <%s>`_' % (subentryname, subentrylinks[0])
)
for i, link in enumerate(subentrylinks[1:]):
output[-1] += ' `[%s] <%s>`_ ' % (i + 1, link)
output.append('')
else:
output.append(subentryname)
output.append('')
doctree = docutils.core.publish_doctree('\n'.join(output))
return doctree[1]
class PDFContents(Contents):
# Mostly copied from Docutils' Contents transformation
def build_contents(self, node, level=0):
level += 1
sections = []
# Replaced this with the for below to make it work for Sphinx
# trees.
# sections = [sect for sect in node if isinstance(sect, nodes.section)]
for sect in node:
if isinstance(sect, nodes.compound):
for sect2 in sect:
if isinstance(sect2, addnodes.start_of_file):
for sect3 in sect2:
if isinstance(sect3, nodes.section):
sections.append(sect3)
elif isinstance(sect, nodes.section):
sections.append(sect)
entries = []
# FIXME: depth should be taken from :maxdepth: (Issue 320)
depth = self.toc_depth
for section in sections:
title = section[0]
auto = title.get('auto') # May be set by SectNum.
entrytext = self.copy_and_filter(title)
reference = nodes.reference('', '', refid=section['ids'][0], *entrytext)
ref_id = self.document.set_id(reference)
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
if (
self.backlinks in ('entry', 'top')
and title.next_node(nodes.reference) is None
):
if self.backlinks == 'entry':
title['refid'] = ref_id
elif self.backlinks == 'top':
title['refid'] = self.toc_id
if level < depth:
subsects = self.build_contents(section, level)
item += subsects
entries.append(item)
if entries:
contents = nodes.bullet_list('', *entries)
if auto:
contents['classes'].append('auto-toc')
return contents
else:
return []
class PDFWriter(writers.Writer):
def __init__(
self,
builder,
stylesheets,
language,
breaklevel=0,
breakside='any',
fontpath=[],
fitmode='shrink',
compressed=False,
inline_footnotes=False,
splittables=True,
srcdir='.',
default_dpi=300,
page_template='decoratedPage',
invariant=False,
real_footnotes=False,
use_toc=True,
use_coverpage=True,
toc_depth=9999,
use_numbered_links=False,
fit_background_mode="scale",
section_header_depth=2,
baseurl=urlunparse(['file', os.getcwd() + os.sep, '', '', '', '']),
style_path=None,
repeat_table_rows=False,
config={},
):
writers.Writer.__init__(self)
self.builder = builder
self.output = ''
self.stylesheets = stylesheets
self.__language = language
self.breaklevel = int(breaklevel)
self.breakside = breakside
self.fontpath = fontpath
self.fitmode = fitmode
self.compressed = compressed
self.inline_footnotes = inline_footnotes
self.splittables = splittables
self.highlightlang = builder.config.highlight_language
self.srcdir = srcdir
self.config = config
self.default_dpi = default_dpi
self.page_template = page_template
self.invariant = invariant
self.real_footnotes = real_footnotes
self.use_toc = use_toc
self.use_coverpage = use_coverpage
self.toc_depth = toc_depth
self.use_numbered_links = use_numbered_links
self.fit_background_mode = fit_background_mode
self.section_header_depth = section_header_depth
self.repeat_table_rows = repeat_table_rows
self.baseurl = baseurl
if hasattr(sys, 'frozen'):
self.PATH = os.path.abspath(os.path.dirname(sys.executable))
else:
self.PATH = os.path.abspath(os.path.dirname(__file__))
if style_path:
self.style_path = style_path
else:
self.style_path = [self.srcdir]
supported = 'pdf'
config_section = 'pdf writer'
config_section_dependencies = ('writers',)
def translate(self):
visitor = PDFTranslator(self.document, self.builder)
self.document.walkabout(visitor)
lang = self.config.language or 'en'
langmod = get_language_available(lang)[2]
self.docutils_languages = {lang: langmod}
# Generate Contents topic manually
if self.use_toc:
contents = nodes.topic(classes=['contents'])
contents += nodes.title('')
contents[0] += nodes.Text(langmod.labels['contents'])
contents['ids'] = ['Contents']
pending = nodes.topic()
contents.append(pending)
pending.details = {}
self.document.insert(
0, nodes.raw(text='SetPageCounter 1 arabic', format='pdf')
)
self.document.insert(
0, nodes.raw(text='OddPageBreak %s' % self.page_template, format='pdf')
)
self.document.insert(0, contents)
self.document.insert(
0, nodes.raw(text='SetPageCounter 1 lowerroman', format='pdf')
)
contTrans = PDFContents(self.document)
contTrans.toc_depth = self.toc_depth
contTrans.startnode = pending
contTrans.apply()
if self.use_coverpage:
# Generate cover page
# FIXME: duplicate from createpdf, refactor!
# Add the Sphinx template paths
def add_template_path(path):
return os.path.join(self.srcdir, path)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
[
self.srcdir,
os.path.expanduser('~/.rst2pdf'),
os.path.join(self.PATH, 'templates'),
]
+ list(map(add_template_path, self.config.templates_path))
),
autoescape=jinja2.select_autoescape(['html', 'xml']),
)
try:
template = jinja_env.get_template(self.config.pdf_cover_template)
except jinja2.TemplateNotFound:
log.error(
"Can't find cover template %s, using default"
% self.config.pdf_cover_template
)
template = jinja_env.get_template('sphinxcover.tmpl')
# This is what's used in the python docs because
# Latex does a manual linebreak. This sucks.
authors = self.document.settings.author.split('\\')
# Honour the "today" config setting
if self.config.today:
date = self.config.today
else:
date = time.strftime(self.config.today_fmt or _('%B %d, %Y'))
# Feed data to the template, get restructured text.
cover_text = template.render(
title=self.document.settings.title or visitor.elements['title'],
subtitle='%s %s' % (_('version'), self.config.version),
authors=authors,
date=date,
)
cover_tree = docutils.core.publish_doctree(cover_text)
self.document.insert(0, cover_tree)
sio = BytesIO()
if self.invariant:
createpdf.patch_PDFDate()
createpdf.patch_digester()
createpdf.RstToPdf(
sphinx=True,
stylesheets=self.stylesheets,
language=self.__language,
breaklevel=self.breaklevel,
breakside=self.breakside,
fit_mode=self.fitmode,
font_path=self.fontpath,
inline_footnotes=self.inline_footnotes,
highlightlang=self.highlightlang,
splittables=self.splittables,
style_path=self.style_path,
repeat_table_rows=self.repeat_table_rows,
basedir=self.srcdir,
def_dpi=self.default_dpi,
real_footnotes=self.real_footnotes,
numbered_links=self.use_numbered_links,
background_fit_mode=self.fit_background_mode,
baseurl=self.baseurl,
section_header_depth=self.section_header_depth,
).createPdf(doctree=self.document, output=sio, compressed=self.compressed)
self.output = sio.getvalue()
def supports(self, format):
"""This writer supports all format-specific elements."""
return 1
class PDFTranslator(nodes.SparseNodeVisitor):
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
self.footnotestack = []
self.curfilestack = []
self.highlightlinenothreshold = 999999
self.top_sectionlevel = 1
self.footnotecounter = 1
self.curfile = None
self.footnotedict = {}
self.this_is_the_title = True
self.in_title = 0
self.elements = {
'title': document.settings.title,
}
self.highlightlang = builder.config.highlight_language
def visit_document(self, node):
self.curfilestack.append(node.get('docname', ''))
self.footnotestack.append('')
def visit_start_of_file(self, node):
self.curfilestack.append(node['docname'])
self.footnotestack.append(node['docname'])
def depart_start_of_file(self, node):
self.footnotestack.pop()
self.curfilestack.pop()
def visit_highlightlang(self, node):
self.highlightlang = node['lang']
self.highlightlinenothreshold = node['linenothreshold']
raise nodes.SkipNode
def visit_versionmodified(self, node):
replacement = nodes.paragraph()
replacement.extend(node.children)
node.parent.replace(node, replacement)
def depart_versionmodified(self, node):
pass
def visit_literal_block(self, node):
if 'code' in node['classes']: # Probably a processed code-block
pass
else:
lang = lang_for_block(
node.astext(), node.get('language', self.highlightlang)
)
content = node.astext().splitlines()
if len(content) > self.highlightlinenothreshold or node.get(
'linenos', False
):
options = {'linenos': True}
else:
options = {}
# FIXME: make tab width configurable
content = [c.replace('\t', ' ') for c in content]
replacement = nodes.literal_block()
replacement.children = code_block.code_block_directive(
name=None,
arguments=[lang],
options=options,
content=content,
lineno=False,
content_offset=None,
block_text=None,
state=None,
state_machine=None,
)
node.parent.replace(node, replacement)
def visit_footnote(self, node):
node['backrefs'] = [
'%s_%s' % (self.footnotestack[-1], x) for x in node['backrefs']
]
node['ids'] = ['%s_%s' % (self.footnotestack[-1], x) for x in node['ids']]
node.children[0][0] = nodes.Text(str(self.footnotecounter))
for id in node['backrefs']:
try:
fnr = self.footnotedict[id]
except KeyError:
pass
else:
fnr.children[0] = nodes.Text(str(self.footnotecounter))
self.footnotedict[node['ids'][0]] = node
self.footnotecounter += 1
def visit_footnote_reference(self, node):
node['ids'] = ['%s_%s' % (self.footnotestack[-1], x) for x in node['ids']]
node['refid'] = '%s_%s' % (self.footnotestack[-1], node['refid'])
self.footnotedict[node['ids'][0]] = node
try:
footnote = self.footnotedict[node['refid']]
except KeyError:
pass
else:
node.children[0] = nodes.Text(footnote.children[0][0])
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
# This is for graphviz support
def visit_graphviz(self, node):
# Not neat, but I need to send self to my handlers
node['builder'] = self
def visit_Aanode(self, node):
pass
def depart_Aanode(self, node):
pass
def visit_productionlist(self, node):
replacement = nodes.literal_block(classes=["code"])
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
for production in node:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
n = nodes.strong()
n += nodes.Text(lastname)
replacement += n
replacement += nodes.Text(' ::= ')
else:
replacement += nodes.Text('%s ' % (' ' * len(lastname)))
production.walkabout(self)
replacement.children.extend(production.children)
replacement += nodes.Text('\n')
node.parent.replace(node, replacement)
raise nodes.SkipNode
def depart_productionlist(self, node):
pass
def visit_production(self, node):
pass
def depart_production(self, node):
pass
def visit_OddEvenNode(self, node):
pass
def depart_OddEvenNode(self, node):
pass
class HighlightLanguageTransform(SphinxTransform):
"""
This is a copy of Sphinx's HighlightLanguageTransform for use with Sphinx versions between 1.8.0 & less than 2.0.0
as the Sphinx version of this class breaks the linenothreshold setting in the highlight directive (See issue #721).
This code can be removed when we drop support for Python 2
Apply highlight_language to all literal_block nodes.
This refers both :confval:`highlight_language` setting and
:rst:dir:`highlightlang` directive.
After processing, this overridden transform DOES NOT REMOVE ``highlightlang`` node from doctree in order to allow
pdfbuilder's visit_highlightlang to work as before.
"""
default_priority = 400
def apply(self):
from sphinx.transforms.post_transforms.code import HighlightLanguageVisitor
visitor = HighlightLanguageVisitor(
self.document, self.config.highlight_language
)
self.document.walkabout(visitor)
# This is copied from sphinx.highlighting
def lang_for_block(source, lang):
if lang in ('py', 'python'):
if source.startswith('>>>'):
# interactive session
return 'pycon'
else:
# maybe Python -- try parsing it
if try_parse(source):
return 'python'
else: # Guess
return lang_for_block(source, 'guess')
elif lang in ('python3', 'py3') and source.startswith('>>>'):
# for py3, recognize interactive sessions, but do not try parsing...
return 'pycon3'
elif lang == 'guess':
try:
# return 'python'
lexer = guess_lexer(source)
return lexer.aliases[0]
except Exception:
return None
else:
return lang
def try_parse(src):
# Make sure it ends in a newline
src += '\n'
# Replace "..." by a mark which is also a valid python expression
# (Note, the highlighter gets the original source, this is only done
# to allow "..." in code and still highlight it as Python code.)
mark = "__highlighting__ellipsis__"
src = src.replace("...", mark)
# lines beginning with "..." are probably placeholders for suite
src = re.sub(r"(?m)^(\s*)" + mark + "(.)", r"\1" + mark + r"# \2", src)
if not isinstance(src, bytes):
# Non-ASCII chars will only occur in string literals
# and comments. If we wanted to give them to the parser
# correctly, we'd have to find out the correct source
# encoding. Since it may not even be given in a snippet,
# just replace all non-ASCII characters.
src = src.encode('ascii', 'replace')
return True
def setup(app):
app.add_builder(PDFBuilder)
# PDF options
app.add_config_value('pdf_documents', [], None)
app.add_config_value('pdf_stylesheets', ['sphinx'], None)
app.add_config_value('pdf_style_path', None, None)
app.add_config_value('pdf_compressed', False, None)
app.add_config_value('pdf_font_path', [], None)
app.add_config_value('pdf_language', 'en_US', None)
app.add_config_value('pdf_fit_mode', '', None),
app.add_config_value('pdf_break_level', 0, None)
app.add_config_value('pdf_inline_footnotes', True, None)
app.add_config_value('pdf_verbosity', 0, None)
app.add_config_value('pdf_use_index', True, None)
app.add_config_value('pdf_domain_indices', True, None)
app.add_config_value('pdf_use_modindex', True, None)
app.add_config_value('pdf_use_coverpage', True, None)
app.add_config_value('pdf_cover_template', 'sphinxcover.tmpl', None)
app.add_config_value('pdf_appendices', [], None)
app.add_config_value('pdf_splittables', True, None)
app.add_config_value('pdf_repeat_table_rows', False, None)
app.add_config_value('pdf_breakside', 'odd', None)
app.add_config_value('pdf_default_dpi', 300, None)
app.add_config_value('pdf_extensions', [], None)
app.add_config_value('pdf_page_template', 'decoratedPage', None)
app.add_config_value('pdf_invariant', False, None)
app.add_config_value('pdf_real_footnotes', False, None)
app.add_config_value('pdf_use_toc', True, None)
app.add_config_value('pdf_toc_depth', 9999, None)
app.add_config_value('pdf_use_numbered_links', False, None)
app.add_config_value('pdf_fit_background_mode', "scale", None)
app.add_config_value('section_header_depth', 2, None)
app.add_config_value(
'pdf_baseurl', urlunparse(['file', os.getcwd() + os.sep, '', '', '', '']), None
)
project_doc = app.config.project + ' Documentation'
app.config.pdf_documents.append(
(
app.config.master_doc,
app.config.project,
project_doc,
app.config.copyright,
'manual',
)
)
return {
'version': rst2pdf.version,
'parallel_read_safe': True,
'parallel_write_safe': False,
}
``` |
{
"source": "2bndy5/sphinx-design",
"score": 2
} |
#### File: sphinx-design/sphinx_design/icons.py
```python
import json
import re
from functools import lru_cache
from typing import Any, Dict, List, Optional, Sequence, Tuple
try:
import importlib.resources as resources
except ImportError:
# python < 3.7
import importlib_resources as resources # type: ignore[no-redef]
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.application import Sphinx
from sphinx.util.docutils import SphinxDirective, SphinxRole
from . import compiled
OCTICON_VERSION = "0.0.0-dd899ea"
OCTICON_CSS = """\
.octicon {
display: inline-block;
vertical-align: text-top;
fill: currentColor;
}"""
def setup_icons(app: Sphinx) -> None:
app.add_role("octicon", OcticonRole())
app.add_directive("_all-octicon", AllOcticons)
for style in ["fa", "fas", "fab", "far"]:
# note: fa is deprecated in v5, fas is the default and fab is the other free option
app.add_role(style, FontawesomeRole(style))
app.add_config_value("sd_fontawesome_latex", False, "env")
app.connect("config-inited", add_fontawesome_pkg)
app.add_node(
fontawesome,
html=(visit_fontawesome_html, depart_fontawesome_html),
latex=(visit_fontawesome_latex, None),
text=(None, None),
man=(None, None),
texinfo=(None, None),
)
@lru_cache(1)
def get_octicon_data() -> Dict[str, Any]:
"""Load all octicon data."""
content = resources.read_text(compiled, "octicons.json")
return json.loads(content)
def list_octicons() -> List[str]:
"""List available octicon names."""
return list(get_octicon_data().keys())
HEIGHT_REGEX = re.compile(r"^(?P<value>\d+(\.\d+)?)(?P<unit>px|em|rem)$")
def get_octicon(
name: str,
height: str = "1em",
classes: Sequence[str] = (),
aria_label: Optional[str] = None,
) -> str:
"""Return the HTML for an GitHub octicon SVG icon.
:height: the height of the octicon, with suffix unit 'px', 'em' or 'rem'.
"""
try:
data = get_octicon_data()[name]
except KeyError:
raise KeyError(f"Unrecognised octicon: {name}")
match = HEIGHT_REGEX.match(height)
if not match:
raise ValueError(
f"Invalid height: '{height}', must be format <integer><px|em|rem>"
)
height_value = round(float(match.group("value")), 3)
height_unit = match.group("unit")
original_height = 16
if "16" not in data["heights"]:
original_height = int(list(data["heights"].keys())[0])
elif "24" in data["heights"]:
if height_unit == "px":
if height_value >= 24:
original_height = 24
elif height_value >= 1.5:
original_height = 24
original_width = data["heights"][str(original_height)]["width"]
width_value = round(original_width * height_value / original_height, 3)
content = data["heights"][str(original_height)]["path"]
options = {
"version": "1.1",
"width": f"{width_value}{height_unit}",
"height": f"{height_value}{height_unit}",
"class": " ".join(("sd-octicon", f"sd-octicon-{name}", *classes)),
}
options["viewBox"] = f"0 0 {original_width} {original_height}"
if aria_label is not None:
options["aria-label"] = aria_label
options["role"] = "img"
else:
options["aria-hidden"] = "true"
opt_string = " ".join(f'{k}="{v}"' for k, v in options.items())
return f"<svg {opt_string}>{content}</svg>"
class OcticonRole(SphinxRole):
"""Role to display a GitHub octicon SVG.
Additional classes can be added to the element after a semicolon.
"""
def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]:
"""Run the role."""
values = self.text.split(";") if ";" in self.text else [self.text]
icon = values[0]
height = "1em" if len(values) < 2 else values[1]
classes = "" if len(values) < 3 else values[2]
icon = icon.strip()
try:
svg = get_octicon(icon, height=height, classes=classes.split())
except Exception as exc:
msg = self.inliner.reporter.error(
f"Invalid octicon content: {exc}",
line=self.lineno,
)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
node = nodes.raw("", nodes.Text(svg), format="html")
self.set_source_info(node)
return [node], []
class AllOcticons(SphinxDirective):
"""Directive to generate all octicon icons.
Primarily for self documentation.
"""
option_spec = {
"class": directives.class_option,
}
def run(self) -> List[nodes.Node]:
"""Run the directive."""
classes = self.options.get("class", [])
list_node = nodes.bullet_list()
for icon in list_octicons():
item_node = nodes.list_item()
item_node.extend(
(
nodes.literal(icon, icon),
nodes.Text(": "),
nodes.raw(
"",
nodes.Text(get_octicon(icon, classes=classes)),
format="html",
),
)
)
list_node += item_node
return [list_node]
class fontawesome(nodes.Element, nodes.General):
"""Node for rendering fontawesome icon."""
class FontawesomeRole(SphinxRole):
"""Role to display a Fontawesome icon.
Additional classes can be added to the element after a semicolon.
"""
def __init__(self, style: str) -> None:
super().__init__()
self.style = style
def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]:
"""Run the role."""
icon, classes = self.text.split(";", 1) if ";" in self.text else [self.text, ""]
icon = icon.strip()
node = fontawesome(
icon=icon, classes=[self.style, f"fa-{icon}"] + classes.split()
)
self.set_source_info(node)
return [node], []
def visit_fontawesome_html(self, node):
self.body.append(self.starttag(node, "span", ""))
def depart_fontawesome_html(self, node):
self.body.append("</span>")
def add_fontawesome_pkg(app, config):
if app.config.sd_fontawesome_latex:
app.add_latex_package("fontawesome")
def visit_fontawesome_latex(self, node):
if self.config.sd_fontawesome_latex:
self.body.append(f"\\faicon{{{node['icon_name']}}}")
raise nodes.SkipNode
``` |
{
"source": "2bndy5/sphinx-immaterial",
"score": 2
} |
#### File: 2bndy5/sphinx-immaterial/merge_from_mkdocs_material.py
```python
import argparse
import contextlib
import json
import os
import pathlib
import shutil
import subprocess
import tempfile
MKDOCS_EXCLUDE_PATTERNS = [
# mkdocs-specific configuration files
".gitignore",
".gitattributes",
".github",
".browserslistrc",
".dockerignore",
"requirements.txt",
"setup.py",
"Dockerfile",
"MANIFEST.in",
# Generated files
"material",
# mkdocs-specific files
"src/*.py",
"src/mkdocs_theme.yml",
"src/404.html",
"mkdocs.yml",
# Unneeded files
"typings/lunr",
"src/assets/javascripts/browser/worker",
"src/assets/javascripts/integrations/search/worker",
# Files specific to mkdocs' own documentation
"src/overrides",
"src/assets/images/favicon.png",
"src/.icons/logo.*",
"docs",
"LICENSE",
"CHANGELOG",
"package-lock.json",
"*.md",
]
ap = argparse.ArgumentParser()
ap.add_argument(
"--clone-dir",
default="/tmp/mkdocs-material",
help="Temporary directory used for pristine checkout of mkdocs-material. "
"This remains as a cache after this script completes even if "
"`--keep-temp` is not specified.",
)
ap.add_argument(
"--patch-output",
default="/tmp/mkdocs-material-patch",
help="Path where patch is written.",
)
ap.add_argument("--source-ref", default="origin/master")
ap.add_argument("--keep-temp", action="store_true", help="Keep temporary workdir")
ap.add_argument(
"--dry-run", action="store_true", help="Just print the patch but do not apply."
)
args = ap.parse_args()
source_ref = args.source_ref
script_dir = os.path.dirname(__file__)
merge_base_path = os.path.join(script_dir, "MKDOCS_MATERIAL_MERGE_BASE")
merge_base = pathlib.Path(merge_base_path).read_text(encoding="utf-8").strip()
clone_dir = args.clone_dir
if not os.path.exists(clone_dir):
subprocess.run(
["git", "clone", "https://github.com/squidfunk/mkdocs-material", clone_dir],
check=True,
)
else:
subprocess.run(
["git", "fetch", "origin"],
cwd=clone_dir,
check=True,
)
def _fix_package_json(path: pathlib.Path) -> None:
content = json.loads(path.read_text(encoding="utf-8"))
content.pop("version", None)
content["dependencies"].pop("lunr")
content["dependencies"].pop("fuzzaldrin-plus")
content["dependencies"].pop("lunr-languages")
content["devDependencies"].pop("@types/lunr")
content["devDependencies"].pop("@types/fuzzaldrin-plus")
path.write_text(json.dumps(content, indent=2) + "\n", encoding="utf-8")
def _resolve_ref(ref: str) -> str:
return subprocess.run(
["git", "rev-parse", ref],
encoding="utf-8",
cwd=clone_dir,
check=True,
stdout=subprocess.PIPE,
).stdout.strip()
@contextlib.contextmanager
def _temp_worktree_path():
if args.keep_temp:
temp_workdir = tempfile.mkdtemp()
yield temp_workdir
return
with tempfile.TemporaryDirectory() as temp_workdir:
try:
yield temp_workdir
finally:
subprocess.run(
["git", "worktree", "remove", "--force", temp_workdir],
cwd=clone_dir,
check=True,
)
def _create_adjusted_tree(ref: str, temp_workdir: str) -> str:
print(f"Checking out {source_ref} -> {temp_workdir}")
subprocess.run(
["git", "worktree", "add", "--detach", temp_workdir, ref],
cwd=clone_dir,
check=True,
)
subprocess.run(
["git", "rm", "--quiet", "-r"] + MKDOCS_EXCLUDE_PATTERNS,
cwd=temp_workdir,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_fix_package_json(pathlib.Path(temp_workdir) / "package.json")
try:
subprocess.run(
["git", "commit", "--no-verify", "-a", "-m", "Exclude files"],
cwd=temp_workdir,
capture_output=True,
check=True,
)
except subprocess.CalledProcessError as exc:
# `git commit` fails if user info not in `git config` -> provide verbosity
raise RuntimeError(str(exc.stderr, encoding="utf-8")) from exc
return subprocess.run(
["git", "rev-parse", "HEAD"],
cwd=temp_workdir,
check=True,
encoding="utf-8",
stdout=subprocess.PIPE,
).stdout.strip()
def _get_git_status(workdir: str):
status_output = subprocess.run(
["git", "status", "--porcelain=v1", "-z", "--no-renames"],
stdout=subprocess.PIPE,
check=True,
text=True,
cwd=workdir,
).stdout
result = {}
for line in status_output.split("\x00"):
if not line:
continue
status_code = line[:2]
filename = line[3:]
result[filename] = status_code
return result
def _characterize_git_status(file_status):
conflicts = []
updated = []
for filename, status in file_status.items():
if "U" in status:
conflicts.append(filename)
continue
if status != " ":
updated.append(filename)
return updated, conflicts
def main():
resolved_source_ref = _resolve_ref(args.source_ref)
print(f"SHA for source_ref {args.source_ref} is {resolved_source_ref}")
print("\nGetting mkdocs-material repo ready")
with _temp_worktree_path() as temp_workdir:
new_tree_commit = _create_adjusted_tree(resolved_source_ref, temp_workdir)
patch_path = os.path.abspath(args.patch_output)
if not os.path.exists(patch_path):
os.makedirs(patch_path)
patch_path += os.sep + "patch_info.diff"
print("\nGetting sphinx-immaterial repo ready")
with _temp_worktree_path() as temp_workdir:
print(" creating a temp workspace")
old_tree_commit = _create_adjusted_tree(merge_base, temp_workdir)
subprocess.run(
["git", "rm", "-r", "."],
cwd=temp_workdir,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print(" copying files to the temp workspace.")
shutil.copytree(
script_dir,
temp_workdir,
dirs_exist_ok=True,
ignore=shutil.ignore_patterns(
".git",
"node_modules",
".icons",
"_build",
),
)
print(" creating a local-only commit")
subprocess.run(
["git", "add", "-A", "--force", "."],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=temp_workdir,
check=True,
)
subprocess.run(
["git", "commit", "--no-verify", "-a", "-m", "Working changes"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=temp_workdir,
check=True,
)
print("\nCreating a diff report")
with tempfile.NamedTemporaryFile(mode="wb") as patch_f:
subprocess.run(
["git", "diff", f"{old_tree_commit}..{new_tree_commit}"],
cwd=clone_dir,
stdout=patch_f,
check=True,
)
patch_f.flush()
try:
print("\nCreating a patch report")
subprocess.run(
["git", "apply", "--3way", patch_f.name],
check=True,
cwd=temp_workdir,
capture_output=True,
)
except subprocess.CalledProcessError as exc:
# provide a verbose coherent output from `git apply` when problematic.
output = str(exc.stdout, encoding="utf-8").replace("\n", "\n ")
output += str(exc.stderr, encoding="utf-8").replace("\n", "\n ")
print(f"`{' '.join(exc.cmd)}` returned {exc.returncode}\n {output}")
with open(patch_path, "wb") as patch_f:
subprocess.run(
["git", "diff", "HEAD"], check=True, cwd=temp_workdir, stdout=patch_f
)
file_status = _get_git_status(temp_workdir)
updated_files, conflict_files = _characterize_git_status(file_status)
print("Patch in: " + patch_path)
if not args.dry_run:
print("\nApplying patch file to sphinx-immaterial repo.")
# LINUX ONLY - the `patch` cmd doesn't have a native equivalent on Windows.
with open(patch_path, "rb") as std_in_file:
subprocess.run(
["patch", "-p1"], stdin=std_in_file, check=True, cwd=script_dir
)
print("\nStaging non-conflicting files.")
subprocess.run(["git", "add", "--"] + updated_files, check=True, cwd=script_dir)
pathlib.Path(merge_base_path).write_text(
resolved_source_ref + "\n", encoding="utf-8"
)
else:
print(pathlib.Path(patch_path).read_text(encoding="utf-8"))
if conflict_files:
print("File with conflicts:")
for filename in conflict_files:
print(f"{file_status[filename]} {filename}")
if __name__ == "__main__":
main()
```
#### File: sphinx-immaterial/sphinx_immaterial/details_patch.py
```python
from typing import List
from docutils import nodes
try:
from sphinxcontrib.details.directive import DetailsDirective
except ImportError:
DetailsDirective = None
def monkey_patch_details_run():
"""Patch the details directive to respect the class option.
This solution is a temporary fix pending response from
https://github.com/tk0miya/sphinxcontrib-details-directive/issues/4
"""
if DetailsDirective is None:
return
def run(self) -> List[nodes.container]:
admonition = nodes.container(
"",
classes=self.options.get("class", []) + self.options.get("classes", []),
opened="open" in self.options,
type="details",
)
textnodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
admonition += nodes.paragraph(self.arguments[0], "", *textnodes)
admonition += messages
self.state.nested_parse(self.content, self.content_offset, admonition)
self.add_name(admonition)
return [admonition]
DetailsDirective.run = run
```
#### File: sphinx-immaterial/sphinx_immaterial/md_admonition.py
```python
from docutils import nodes
from docutils.parsers.rst.roles import set_classes
from docutils.parsers.rst.directives import admonitions
from sphinx.application import Sphinx
__version__ = "0.0.1"
class NoTitleAdmonition(admonitions.BaseAdmonition):
optional_arguments = 1
node_class = nodes.admonition
def run(self):
set_classes(self.options)
self.assert_has_content()
text = "\n".join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0] if self.arguments else ""
textnodes, messages = self.state.inline_text(title_text, self.lineno)
title = nodes.title(title_text, "", *textnodes)
title.source, title.line = self.state_machine.get_source_and_line(
self.lineno
)
if title_text:
admonition_node += title
admonition_node += messages
if not "classes" in self.options and title_text:
admonition_node["classes"] += ["admonition" + nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset, admonition_node)
return [admonition_node]
def setup(app: Sphinx):
"""register our custom directive."""
app.add_directive("md-admonition", NoTitleAdmonition)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
``` |
{
"source": "2bndy5/webapp-standalone",
"score": 3
} |
#### File: webapp/inputs/camera_manager.py
```python
try:
import cStringIO as io
except ImportError:
import io
from .check_platform import ON_RASPI
from ..utils.super_logger import logger
CAMERA_AVAILABLE = False
# Import the proper libraries depending on platform
if ON_RASPI:
try:
import picamera
CAMERA_AVAILABLE = True
except ImportError:
logger.warning('Camera', 'The "picamera" module is not installed')
else: # running on a PC
try:
import cv2
CAMERA_AVAILABLE = True
except ImportError:
logger.warning('Camera', 'The "opencv-python" is not installed')
CAMERA_AVAILABLE = False
class CameraManager:
""" This class is for abstracting the camera feed capabilities. """
def __init__(self):
self.camera = None
@property
def initialized(self):
""" Returns true if the camera is ready to be used """
return CAMERA_AVAILABLE and self.camera is not None
def _init_cv2_camera(self):
""" Initialize the camera feed using OpenCV's implementation """
camera = cv2.VideoCapture(0)
return camera
def _init_pi_camera(self):
""" Initialize the camera feed using PiCamera's implementation """
camera = picamera.PiCamera()
camera.resolution = (256, 144)
camera.start_preview(fullscreen=False, window=(100, 20, 650, 480))
# time.sleep(1)
# camera.stop_preview()
return camera
def open_camera(self):
""" Opens and initializes the camera """
if not CAMERA_AVAILABLE:
raise RuntimeError('The camera is not available for use!')
if ON_RASPI:
try:
self.camera = self._init_pi_camera()
except picamera.exc.PiCameraError as picam_error:
self.camera = None
logger.error('Camera', 'The picamera is not connected!')
logger.error('Camera', picam_error)
else: # running on a PC
try:
self.camera = self._init_cv2_camera()
except cv2.error as cv2_error:
self.camera = None
logger.error('Camera', 'An openCV error occurred!')
logger.error('Camera', cv2_error)
def capture_image(self):
""" Fetches an image from the camera feed and incodes it as a JPEG buffer """
if self.initialized:
if ON_RASPI:
sio = io.BytesIO()
self.camera.capture(sio, "jpeg", use_video_port=True)
buffer = sio.getvalue()
else:
_, frame = self.camera.read()
_, buffer = cv2.imencode('.jpg', frame)
return buffer
else:
raise RuntimeError('Camera manager is not initialized!')
def close_camera(self):
"""
Cleans up and closes the camera. Note that you cannot use the camera unless you
re-initialize it with `open_camera()`
"""
if self.initialized:
if ON_RASPI: # Using PiCamera
self.camera.close()
else: # Using OpenCV
self.camera.release()
self.camera = None
```
#### File: webapp-standalone/webapp/routes.py
```python
import os
from flask import Blueprint, render_template, request, flash, redirect
from flask_login import login_required, login_user, logout_user, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from .sockets import socketio
from .users import User, DB
blueprint = Blueprint('blueprint', __name__)
@blueprint.route('/')
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
""" Renders the register page """
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
user = User(username, generate_password_hash(password))
if User.query.filter_by(username=username).count() > 0:
flash("Account already exists", 'error')
else:
DB.session.add(user)
DB.session.commit()
flash('User successfully registered', 'success')
return redirect('/login')
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
"""
If it's a POST request, it will attempt to log the user in.
Otherwise, it renders the login page.
"""
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
registered_user = User.query.filter_by(username=username).first()
if registered_user and check_password_hash(registered_user.password, password):
login_user(registered_user)
flash('Logged in successfully', 'success')
else:
flash('Username or Password is invalid', 'error')
return redirect('/login')
return redirect('home')
@blueprint.route('/logout')
@login_required
def logout():
""" Redirects to login page after logging out """
logout_user()
return redirect('login')
@blueprint.route('/')
@blueprint.route('/home')
@login_required
def home():
""" Renders the home page """
return render_template('home.html', title='Home')
@blueprint.route('/remote')
@login_required
def remote():
""" Renders the remote page """
return render_template('remote.html', title='Remote Control')
@blueprint.route('/sensors')
@login_required
def sensors():
""" Renders the sensor dashboard page """
return render_template('sensors.html', title='Sensor Dashboard')
@blueprint.route('/automode')
@login_required
def automode():
""" Renders the autonomous page """
return render_template('automode.html', title='Autonomous Navigation')
@blueprint.route('/terminal')
@login_required
def terminal():
""" Renders the virtual terminal page """
return render_template('terminal.html', title='Terminal I/O')
@blueprint.route('/settings')
@login_required
def settings_page():
""" Renders the settings page """
return render_template('settings.html', title='Settings')
@blueprint.route('/about')
def about():
""" Renders the about page """
return render_template('about.html', title='About this project')
@blueprint.route("/shutdown_server")
@login_required
def shutdown_server():
""" Shutdowns the webapp. """
socketio.stop()
@blueprint.route("/restart")
@login_required
def restart():
""" Restarts the robot (Only applicable if webserver runs off rasp pi) """
os.system('sudo reboot')
@blueprint.route("/shutdown_robot")
@login_required
def shutdown_robot():
""" Shutsdown the robot (Only applicable if webserver runs off rasp pi) """
os.system('sudo shutdown -h now')
@blueprint.route("/delete_user")
@login_required
def delete_user():
""" Deletes the current user's account. """
DB.session.delete(current_user)
DB.session.commit()
flash("Account deleted", 'success')
return redirect('/login')
@blueprint.route("/reset_password", methods=['GET', 'POST'])
@login_required
def reset_password():
""" Resets the current user's password. """
if request.method == 'GET':
return render_template('home.html')
old_password = request.form['old-password']
new_password = request.form['new-password']
user = current_user
if check_password_hash(user.password, old_password):
user.password = <PASSWORD>_password_hash(<PASSWORD>)
DB.session.add(user)
DB.session.commit()
flash("Password has been updated", 'success')
else:
flash("Incorrect old password", 'error')
return redirect('home.html')
```
#### File: webapp/utils/file_encryption.py
```python
from cryptography.fernet import Fernet
class FernetVault:
""" A file vault that decrypts the contents of an encrypted file given a key file. """
def __init__(self, key_file_path):
""" Initialize the vault with a master key file. """
with open(key_file_path, 'rb') as fp:
self.key = fp.read()
def read_file(self, input_file):
""" Read an encrypted file. """
with open(input_file, 'rb') as fp:
data = fp.read()
fernet = Fernet(self.key)
decrypted = fernet.decrypt(data)
return decrypted
def write_file(self, data, output_file):
""" Write an encrypted file. """
fernet = Fernet(self.key)
encrypted = fernet.encrypt(data)
with open(output_file, 'wb') as fp:
fp.write(encrypted)
``` |
{
"source": "2bobo/ahiruyaki_counter",
"score": 2
} |
#### File: 2bobo/ahiruyaki_counter/ahiruyaki_counter.py
```python
import os
import sys
import json
import re
import urllib2
import datetime
import time
import ConfigParser
import socket
import struct
import string
import tweepy
class ZabbixSender:
zbx_header = 'ZBXD'
zbx_version = 1
zbx_sender_data = {u'request': u'sender data', u'data': []}
send_data = ''
def __init__(self, server_host, server_port = 10051):
self.server_ip = socket.gethostbyname(server_host)
self.server_port = server_port
def AddData(self, host, key, value, clock = None):
add_data = {u'host': host, u'key': key, u'value': value}
if clock != None:
add_data[u'clock'] = clock
self.zbx_sender_data['data'].append(add_data)
return self.zbx_sender_data
def ClearData(self):
self.zbx_sender_data['data'] = []
return self.zbx_sender_data
def __MakeSendData(self):
zbx_sender_json = json.dumps(self.zbx_sender_data, separators=(',', ':'), ensure_ascii=False).encode('utf-8')
json_byte = len(zbx_sender_json)
self.send_data = struct.pack("<4sBq" + str(json_byte) + "s", self.zbx_header, self.zbx_version, json_byte, zbx_sender_json)
def Send(self):
self.__MakeSendData()
so = socket.socket()
so.connect((self.server_ip, self.server_port))
wobj = so.makefile(u'wb')
wobj.write(self.send_data)
wobj.close()
robj = so.makefile(u'rb')
recv_data = robj.read()
robj.close()
so.close()
tmp_data = struct.unpack("<4sBq" + str(len(recv_data) - struct.calcsize("<4sBq")) + "s", recv_data)
recv_json = json.loads(tmp_data[3])
return recv_data
class ZabbixAPI(object):
# ZABBIX Server APIのURL
zbx_url = ""
# APIを利用するユーザーID
zbx_userid = ""
# パスワード
zbx_passwd = ""
#認証キー
zbx_auth = ""
# HTTPHEADER
headers = {"Content-Type":"application/json-rpc"}
# グラフサイズ width:800
zbx_gwidth = "800"
# グラフサイズ height:300
zbx_gheight = "300"
# グラフ枠線 デフォルト:なし
zbx_gborder = "0"
# auth key 発行用関数
# 戻り値:ZABBIX API auth key
def auth(self):
auth_post = json.dumps({
'jsonrpc': '2.0',
'method': 'user.login',
'params': {
'user': self.zbx_userid,
'password': <PASSWORD>},
'auth':None,
'id': 1})
#opener = urllib2.build_opener(urllib2.HTTPSHandler())
#urllib2.install_opener(opener)
req = urllib2.Request(self.zbx_url, auth_post, self.headers)
f = urllib2.urlopen(req)
str_value = f.read()
f.close()
value = json.loads(str_value)
try:
self.zbx_auth = value["result"]
return value["result"]
except:
print "Authentication failure"
return 0
quit()
storage.close()
def send(self, json_data):
req = urllib2.Request(self.zbx_url, json_data, self.headers)
f = urllib2.urlopen(req)
str_value = f.read()
f.close()
dict_value = json.loads(str_value)
return dict_value
# cokkie 取得用login関数
# 戻り値:cokkieに入れる認証トークン
def login(self, user, passwd):
json_login = json.dumps({
"jsonrpc":"2.0",
"method":"user.login",
"params":{
"user":user,
"password":<PASSWORD>},
"id":1})
sessionid = self.send(json_login)
cookie = sessionid["result"]
cookie = 'zbx_sessionid=' + cookie
return cookie
# グラフ取得
def get_graph(self, cookie, graphid, period, stime):
opener = urllib2.build_opener()
opener.addheaders.append(("cookie",cookie))
graph_url = self.zbx_url.replace("api_jsonrpc", "chart2")
graphi_get_url = "%s?graphid=%s&width=%s&height=%s&border=%s&period=%s&stime=%s" % (
graph_url,
graphid,
self.zbx_gwidth,
self.zbx_gheight,
self.zbx_gborder,
period,
stime)
graph = opener.open(graphi_get_url)
return graph
def run_zbxapi(reqjson):
returndata = zbx_api.send(reqjson)
result = returndata["result"]
if len(result) == 1:
return result
else:
print "error", reqjson, result
exit()
def authorize(conf):
""" Authorize using OAuth.
"""
auth = tweepy.OAuthHandler(conf.get("twitter","consumer_key"), conf.get("twitter","consumer_secret"))
auth.set_access_token(conf.get("twitter","access_key"), conf.get("twitter","access_secret"))
return auth
def create_zbx_item(tweetid, zbx_api, zbx_auth_key, base_item_key):
item_key = base_item_key + tweetid
reqdata = json.dumps({
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"hostids": "10107",
"search": {
"key_": item_key}
},
"auth":zbx_auth_key,
"id": 1})
zbx_item_check_result = zbx_api.send(reqdata)
if len(zbx_item_check_result["result"]) == 0:
if base_item_key.find("hcount") > -1:
attweetid = u"[毎時]@" + tweetid
applications_id = ["461"]
else:
attweetid = u"[日次]@" + tweetid
applications_id = ["462"]
reqdata = json.dumps({
"jsonrpc": "2.0",
"method": "item.create",
"params": {
"name": attweetid,
"key_": item_key,
"hostid": "10107",
"type": 2,
"value_type": 3,
"applications":
applications_id
,
},
"auth":zbx_auth_key,
"id": 1})
zbx_item_create_result = zbx_api.send(reqdata)
return zbx_item_create_result
else:
return zbx_item_check_result
def put_zbx_sender(zbxsvip, zbx_key, hostip, sendvalue):
sender = ZabbixSender(zbxsvip)
sender.AddData(hostip, zbx_key, sendvalue)
try:
sender.Send()
except:
print "[ERROR] host: %s value: %s"%(hostip,sendvalue)
sender.ClearData()
def get_zbx_ahiruyaki_item(zbx_api, zbx_auth_key, item_key):
reqdata = json.dumps({
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"hostids": "10107",
"search": {
"key_": item_key}
},
"auth":zbx_auth_key,
"id": 1})
return zbx_api.send(reqdata)
if __name__ == '__main__':
base = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.normpath(os.path.join(base, 'config.ini'))
conf = ConfigParser.SafeConfigParser()
conf.read(config_file_path)
# zabbix api login
zbx_api = ZabbixAPI()
zbx_api.zbx_url = conf.get("zabbix","url")
zbx_api.zbx_userid = conf.get("zabbix","userid")
zbx_api.zbx_passwd = conf.get("zabbix","passwd")
# get zabbxi api cookie
zbx_auth_key = zbx_api.auth()
argvs = sys.argv
print argvs[1]
if len(argvs) == 2 and argvs[1] == "day":
base_item_key = "ahiruyaki.dcount."
oneoldtime = datetime.datetime.utcnow() - datetime.timedelta(days = 1)
start_time = datetime.datetime(
int(oneoldtime.strftime("%Y")),
int(oneoldtime.strftime("%m")),
int(oneoldtime.strftime("%d")),
0,0,0,0)
end_time = datetime.datetime(
int(oneoldtime.strftime("%Y")),
int(oneoldtime.strftime("%m")),
int(oneoldtime.strftime("%d")),
23,59,59,999999)
elif len(argvs) == 2 and argvs[1] == "hour":
base_item_key = "ahiruyaki.hcount."
oneoldtime = datetime.datetime.utcnow() - datetime.timedelta(hours = 1)
start_time = datetime.datetime(
int(oneoldtime.strftime("%Y")),
int(oneoldtime.strftime("%m")),
int(oneoldtime.strftime("%d")),
int(oneoldtime.strftime("%H")),
0,0,0)
end_time = datetime.datetime(
int(oneoldtime.strftime("%Y")),
int(oneoldtime.strftime("%m")),
int(oneoldtime.strftime("%d")),
int(oneoldtime.strftime("%H")),
59,59,999999)
else:
print len(argvs)
print "Error"
twdate = start_time + datetime.timedelta(hours = 9)
# print start_time + datetime.timedelta(hours = 9)
# print end_time + datetime.timedelta(hours = 9)
use_zbx_item = get_zbx_ahiruyaki_item(zbx_api, zbx_auth_key, base_item_key)
yakishi_list = {}
for item in use_zbx_item["result"]:
twname = item["key_"].replace(base_item_key, "")
yakishi_list[twname] = 0
postdata = unicode(twdate.strftime("%Y年%m月%d日%H時台に焼かれたあひるの数\n(テスト運用中)\n"),'utf-8', 'ignore')
auth = authorize(conf)
api = tweepy.API(auth_handler=auth)
keywords = [u"あひる焼き OR #あひる焼き OR Ahiruyaki OR #Ahiruyaki ", u"-RT"]
query = ' AND '.join(keywords)
new_yaskihi_list = []
for tweet in api.search(q=query, count=1000):
textdata = tweet.text.encode('utf-8')
if textdata.find("あひる焼き") != -1 and textdata.find("あひる焼きカウンター") == -1:
if start_time < tweet.created_at < end_time :
if not tweet.user.screen_name in yakishi_list:
itemdata = create_zbx_item(tweet.user.screen_name, zbx_api, zbx_auth_key, base_item_key)
new_yaskihi_list.append(tweet.user.screen_name)
yakishi_list[tweet.user.screen_name] = 1
else:
yakishi_list[tweet.user.screen_name] += 1
time.sleep(60)
for id in new_yaskihi_list:
item_key = base_item_key + id
put_zbx_sender(conf.get("zabbix","ip"), item_key, "ahiruyaki", 0)
if len(yakishi_list) == 0:
postdata = postdata + u"あひるは焼かれなかった\n"
else:
for id, count in yakishi_list.items():
item_key = base_item_key + id
put_zbx_sender(conf.get("zabbix","ip"), item_key, "ahiruyaki", count)
postdata = postdata + id + ": " + str(count) + u"焼き\n"
#post twitter
# print postdata
# api.update_status(postdata)
``` |
{
"source": "2bobo/zbx_ConoHa",
"score": 2
} |
#### File: 2bobo/zbx_ConoHa/zbx_ConoHa.py
```python
import sys
import json
import requests
import socket
import struct
import time
from datetime import datetime
class ZabbixSender:
zbx_header = 'ZBXD'
zbx_version = 1
zbx_sender_data = {u'request': u'sender data', u'data': []}
send_data = ''
def __init__(self, server_host, server_port = 10051):
self.server_ip = socket.gethostbyname(server_host)
self.server_port = server_port
def AddData(self, host, key, value, clock = None):
add_data = {u'host': host, u'key': key, u'value': value}
if clock != None:
add_data[u'clock'] = clock
self.zbx_sender_data['data'].append(add_data)
return self.zbx_sender_data
def ClearData(self):
self.zbx_sender_data['data'] = []
return self.zbx_sender_data
def __MakeSendData(self):
zbx_sender_json = json.dumps(self.zbx_sender_data, separators=(',', ':'), ensure_ascii=False).encode('utf-8')
json_byte = len(zbx_sender_json)
self.send_data = struct.pack("<4sBq" + str(json_byte) + "s", self.zbx_header, self.zbx_version, json_byte, zbx_sender_json)
def Send(self):
self.__MakeSendData()
so = socket.socket()
so.connect((self.server_ip, self.server_port))
wobj = so.makefile(u'wb')
wobj.write(self.send_data)
wobj.close()
robj = so.makefile(u'rb')
recv_data = robj.read()
robj.close()
so.close()
tmp_data = struct.unpack("<4sBq" + str(len(recv_data) - struct.calcsize("<4sBq")) + "s", recv_data)
recv_json = json.loads(tmp_data[3])
return recv_data
if __name__ == '__main__':
# --- 設定 ---
# Zabbix ServerIP
zbx_sv_ip = "127.0.0.1"
# ConoHa API User
api_user = ""
# ConoHa API Password
api_pass = ""
# ConoGa Tenant ID
tenant_id = ""
# --- 設定ここまで ---
# zabbix sender
sender = ZabbixSender(zbx_sv_ip)
# auth
url = "https://identity.tyo1.conoha.io/v2.0/tokens"
data = json.dumps({"auth":{"passwordCredentials":{"username":api_user ,"password":<PASSWORD>},"tenantId":tenant_id}})
auth_header = {"Accept":"application/json"}
response = requests.post(
url,
data=data,
headers=auth_header)
rdata = response.json()
token_id = str(rdata["access"]["token"]["id"])
def get_conoha_api(url, tokenid, data = ""):
header = {"Accept":"application/json", "X-Auth-Token":token_id}
response = requests.get(
url,
headers=header,
data=data)
return response.json()
# get vmlist
vmlist_url = "https://compute.tyo1.conoha.io/v2/" + tenant_id + "/servers/detail"
rdata = get_conoha_api(vmlist_url, tenant_id)
now_time = str(int(time.time()))
servers = []
data = []
for server in rdata["servers"]:
# VPS Server ID
serverid = server["id"]
servers.append({"id":server["id"], "nametag":server["metadata"]["instance_name_tag"]})
data.append({"{#HOSTID}":server["id"], "{#HOSTNAME}":server["metadata"]["instance_name_tag"]})
# VPS Status
sender.AddData(serverid, "ConoHa.vm.status", server["OS-EXT-STS:power_state"])
# VPS IP
sender.AddData(serverid, "ConoHa.vm.extip", server["name"].replace("-", "."))
# VPS CPU Performance
vm_cpu_url = "https://compute.tyo1.conoha.io/v2/" + tenant_id + "/servers/" + server["id"] + "/rrd/cpu?start_date_raw=" + now_time + "&end_date_raw=" + now_time + "&mode=average"
c = get_conoha_api(vm_cpu_url, tenant_id)
sender.AddData(serverid, "ConoHa.vm.cpupfo", c["cpu"]["data"][0][0])
paiment_url = "https://account.tyo1.conoha.io/v1/" + tenant_id + "/billing-invoices?limit=1&offset=1"
rdata = get_conoha_api(paiment_url, tenant_id)
invoice_date = datetime.strptime(rdata["billing_invoices"][0]["invoice_date"], "%Y-%m-%dT%H:%M:%SZ")
# host
send_data = json.dumps({"data":data})
sender.AddData("ConoHa", "ConoHa.Hosts", send_data)
# payment
argvs = sys.argv
if len(argvs) == 2 and argvs[1] == "payment":
#sender.AddData("ConoHa", "ConoHa.billing-invoices", int(rdata["billing_invoices"][0]["bill_plus_tax"]), int(time.mktime(invoice_date.timetuple())))
sender.AddData("ConoHa", "ConoHa.billing-invoices", int(rdata["billing_invoices"][0]["bill_plus_tax"]))
# send
sender.Send()
sender.ClearData()
``` |
{
"source": "2bRich/python-fanotify",
"score": 3
} |
#### File: python-fanotify/doc/protect.py
```python
from __future__ import print_function
import os
import sys
import fanotify
def IsRootProcess(pid):
return os.stat('/proc/{}'.format(pid)).st_uid == 0
def main():
if len(sys.argv) != 2:
print('Usage: {} <path>'.format(sys.argv[0]))
sys.exit(1)
fan_fd = fanotify.Init(fanotify.FAN_CLASS_CONTENT, os.O_RDONLY)
fanotify.Mark(fan_fd, fanotify.FAN_MARK_ADD, fanotify.FAN_OPEN_PERM, -1,
sys.argv[1])
# Loop continuously rejecting events that don't match root's uid.
while True:
buf = os.read(fan_fd, 4096)
assert buf
while fanotify.EventOk(buf):
buf, event = fanotify.EventNext(buf)
if IsRootProcess(event.pid):
print('Allowing open from root pid {}'.format(event.pid))
response = fanotify.FAN_ALLOW
else:
print('Denying open from pid {}'.format(event.pid))
response = fanotify.FAN_DENY
os.write(fan_fd, fanotify.Response(event.fd, response))
os.close(event.fd)
assert not buf
if __name__ == '__main__':
main()
``` |
{
"source": "2b-t/stereo-matching",
"score": 3
} |
#### File: stereo-matching/src/main.py
```python
import argparse
import matplotlib.pyplot as plt
import numpy as np
from matching_algorithm.matching_algorithm import MatchingAlgorithm
from matching_algorithm.semi_global_matching import SemiGlobalMatching
from matching_algorithm.winner_takes_it_all import WinnerTakesItAll
from matching_cost.matching_cost import MatchingCost
from matching_cost.normalised_cross_correlation import NormalisedCrossCorrelation
from matching_cost.sum_of_absolute_differences import SumOfAbsoluteDifferences
from matching_cost.sum_of_squared_differences import SumOfSquaredDifferences
from stereo_matching import StereoMatching
from utilities import AccX, IO
def main(left_image_path: str, right_image_path: str,
matching_algorithm_name: str, matching_cost_name: str,
max_disparity: int, filter_radius: int,
groundtruth_image_path: str, mask_image_path: str, accx_threshold: int,
output_path: str = None, output_name: str = "unknown", is_plot: bool = True) -> None:
# Imports images for stereo matching, performs stereo matching, plots results and outputs them to a file
# @param[in] left_image_path: Path to the image for the left eye
# @param[in] right_image_path: Path to the image for the right eye
# @param[in] matching_algorithm_name: Name of the matching algorithm
# @param[in] matching_cost_name: Name of the matching cost type
# @param[in] max_disparity: Maximum disparity to consider
# @param[in] filter_radius: Filter radius to be considered for cost volume
# @param[in] groundtruth_image_path: Path to the ground truth image
# @param[in] mask_image_path: Path to the mask for excluding pixels from the AccX accuracy measure
# @param[in] accx_threshold: Mismatch in disparity to accept for AccX accuracy measure
# @param[in] output_path: Location of the output path, if None no output is generated
# @param[in] output_name: Name of the scenario for pre-pending the output file
# @param[in] is_plot: Flag for turning plot of results on and off
# Load input images
left_image = IO.import_image(left_image_path)
right_image = IO.import_image(right_image_path)
# Load ground truth images
groundtruth_image = None
mask_image = None
try:
groundtruth_image = IO.import_image(groundtruth_image_path)
mask_image = IO.import_image(mask_image_path)
except:
pass
# Plot input images
if is_plot is True:
plt.figure(figsize=(8,4))
plt.subplot(1,2,1), plt.imshow(left_image, cmap='gray'), plt.title('Left')
plt.subplot(1,2,2), plt.imshow(right_image, cmap='gray'), plt.title('Right')
plt.tight_layout()
# Set-up algorithm
matching_algorithm = None
if matching_algorithm_name == "SGM":
matching_algorithm = SemiGlobalMatching
elif matching_algorithm_name == "WTA":
matching_algorithm = WinnerTakesItAll
else:
raise ValueError("Matching algorithm '" + matching_algorithm_name + "' not recognised!")
matching_cost = None
if matching_cost_name == "NCC":
matching_cost = NormalisedCrossCorrelation
elif matching_cost_name == "SAD":
matching_cost = SumOfAbsoluteDifferences
elif matching_cost_name == "SSD":
matching_cost = SumOfSquaredDifferences
else:
raise ValueError("Matching cost '" + matching_cost_name + "' not recognised!")
# Perform stereo matching
sm = StereoMatching(left_image, right_image, matching_cost, matching_algorithm, max_disparity, filter_radius)
print("Performing stereo matching...")
sm.compute()
print("Stereo matching completed.")
res_image = sm.result()
# Compute accuracy
try:
accx = AccX.compute(res_image, groundtruth_image, mask_image, accx_threshold)
print("AccX accuracy measure for threshold " + str(accx_threshold) + ": " + str(accx))
except:
accx = None
# Plot result
if is_plot is True:
plt.figure()
plt.imshow(res_image, cmap='gray')
plt.show()
# Output to file
if output_path is not None:
result_file_path = IO.export_image(IO.normalise_image(res_image, groundtruth_image),
output_path, output_name, matching_cost_name, matching_algorithm_name,
max_disparity, filter_radius, accx_threshold)
print("Exported result to file '" + result_file_path + "'.")
return
if __name__== "__main__":
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--left", type=str,
help="Path to left image")
parser.add_argument("-r", "--right", type=str,
help="Path to right image")
parser.add_argument("-a", "--algorithm", type=str, choices=["SGM", "WTA"],
help="Matching cost algorithm", default = "WTA")
parser.add_argument("-c", "--cost", type=str, choices=["NCC", "SAD", "SSD"],
help="Matching cost type", default = "SAD")
parser.add_argument("-D", "--disparity", type=int,
help="Maximum disparity", default = 60)
parser.add_argument("-R", "--radius", type=int,
help="Filter radius", default = 3)
parser.add_argument("-o", "--output", type=str,
help="Output directory, by default no output", default = None)
parser.add_argument("-n", "--name", type=str,
help="Output file name", default = "unknown")
parser.add_argument("-p", "--no-plot", action='store_true',
help="Flag for de-activating plotting")
parser.add_argument("-g", "--groundtruth", type=str,
help="Path to groundtruth image", default = None)
parser.add_argument("-m", "--mask", type=str,
help="Path to mask image for AccX accuracy measure", default = None)
parser.add_argument("-X", "--accx", type=int,
help="AccX accuracy measure threshold", default = 60)
args = parser.parse_args()
main(args.left, args.right, args.algorithm, args.cost, args.disparity, args.radius,
args.groundtruth, args.mask, args.accx,
args.output, args.name, not args.no_plot)
```
#### File: src/matching_cost/matching_cost.py
```python
import abc
import numpy as np
class MatchingCost(abc.ABC):
# Base class for stereo matching costs for calculating a cost volume
@staticmethod
@abc.abstractmethod
def compute(self, left_image: np.ndarray, right_image: np.ndarray, max_disparity: int, filter_radius: int) -> np.ndarray:
# Function for calculating the cost volume
# @param[in] left_image: The left image to be used for stereo matching (H,W)
# @param[in] right_image: The right image to be used for stereo matching (H,W)
# @param[in] max_disparity: The maximum disparity to consider
# @param[in] filter_radius: The filter radius to be considered for matching
# @return: The best matching pixel inside the cost volume according to the pre-defined criterion (H,W,D)
pass
```
#### File: src/matching_cost/normalised_cross_correlation.py
```python
from numba import jit
import numpy as np
from .matching_cost import MatchingCost
class NormalisedCrossCorrelation(MatchingCost):
@staticmethod
@jit(nopython = True, parallel = True, cache = True)
def compute(left_image: np.ndarray, right_image: np.ndarray, max_disparity: int, filter_radius: int) -> np.ndarray:
# Compute a cost volume with maximum disparity D considering a neighbourhood R with Normalized Cross Correlation (NCC)
# @param[in] left_image: The left image to be used for stereo matching (H,W)
# @param[in] right_image: The right image to be used for stereo matching (H,W)
# @param[in] max_disparity: The maximum disparity to consider
# @param[in] filter_radius: The filter radius to be considered for matching
# @return: The best matching pixel inside the cost volume according to the pre-defined criterion (H,W,D)
(H,W) = left_image.shape
cost_volume = np.zeros((max_disparity,H,W))
# Loop over all possible disparities
for d in range(0, max_disparity):
# Loop over image
for y in range(filter_radius, H - filter_radius):
for x in range(filter_radius, W - filter_radius):
l_mean = 0
r_mean = 0
n = 0
# Loop over window
for v in range(-filter_radius, filter_radius + 1):
for u in range(-filter_radius, filter_radius + 1):
# Calculate cumulative sum
l_mean += left_image[y+v, x+u]
r_mean += right_image[y+v, x+u-d]
n += 1
l_mean = l_mean/n
r_mean = r_mean/n
l_r = 0
l_var = 0
r_var = 0
for v in range(-filter_radius, filter_radius + 1):
for u in range(-filter_radius, filter_radius + 1):
# Calculate terms
l = left_image[y+v, x+u] - l_mean
r = right_image[y+v, x+u-d] - r_mean
l_r += l*r
l_var += l**2
r_var += r**2
# Assemble terms
cost_volume[d,y,x] = -l_r/np.sqrt(l_var*r_var)
return np.transpose(cost_volume, (1, 2, 0))
``` |
{
"source": "2buntu/2buntu-blog",
"score": 2
} |
#### File: twobuntu/categories/models.py
```python
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
"""
A grouping for articles of a similar topic.
"""
name = models.CharField(
max_length=40,
help_text="The name of the category.",
)
image = models.ImageField(
upload_to='categories',
blank=True,
null=True,
help_text="A representative image.",
)
def __str__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('categories:view', (), {
'id': self.id,
'slug': slugify(self.name),
})
class Meta:
ordering = ('name',)
verbose_name_plural = 'Categories'
```
#### File: twobuntu/categories/views.py
```python
from django.shortcuts import render
from twobuntu.articles.models import Article
from twobuntu.categories.models import Category
from twobuntu.decorators import canonical
@canonical(Category)
def view(request, category):
"""
Display articles filed under the specified category.
"""
return render(request, 'categories/view.html', {
'title': category.name,
'category': category,
'articles': Article.objects.select_related('author','author__profile','category').filter(category=category, status=Article.PUBLISHED),
})
``` |
{
"source": "2channelkrt/VLAE",
"score": 3
} |
#### File: 2channelkrt/VLAE/datasets.py
```python
import os
import urllib.request
import numpy as np
import torch
import torch.utils.data
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader, TensorDataset
from scipy.io import loadmat
num_workers = 4
lamb = 0.05
class MNIST():
def __init__(self, batch_size, binarize=False, logit_transform=False):
""" [-1, 1, 28, 28]
"""
self.binarize = binarize
self.logit_transform = logit_transform
directory='./datasets/MNIST'
if not os.path.exists(directory):
os.makedirs(directory)
kwargs = {'num_workers': num_workers, 'pin_memory': True} if torch.cuda.is_available() else {}
self.train_loader = DataLoader(
datasets.MNIST('./datasets/MNIST', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(
datasets.MNIST('./datasets/MNIST', train=False, transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=False, **kwargs)
self.dim = [1,28,28]
if self.binarize:
pass
else:
train = torch.stack([data for data, _ in
list(self.train_loader.dataset)], 0).cuda()
train = train.view(train.shape[0], -1)
if self.logit_transform:
train = train * 255.0
train = (train + torch.rand_like(train)) / 256.0
train = lamb + (1 - 2.0 * lamb) * train
train = torch.log(train) - torch.log(1.0 - train)
self.mean = train.mean(0)
self.logvar = torch.log(torch.mean((train - self.mean)**2)).unsqueeze(0)
def preprocess(self, x):
if self.binarize:
x = x.view([-1, np.prod(self.dim)])
return (torch.rand_like(x).cuda() < x).to(torch.float)
elif self.logit_transform:
# apply uniform noise and renormalize
x = x.view([-1, np.prod(self.dim)]) * 255.0
x = (x + torch.rand_like(x)) / 256.0
x = lamb + (1 - 2.0 * lamb) * x
x = torch.log(x) - torch.log(1.0 - x)
return x - self.mean
else:
return x.view([-1, np.prod(self.dim)]) - self.mean
def unpreprocess(self, x):
if self.binarize:
return x.view([-1] + self.dim)
elif self.logit_transform:
x = x + self.mean
x = torch.sigmoid(x)
x = (x - lamb) / (1.0 - 2.0 * lamb)
return x.view([-1] + self.dim)
else:
return (x + self.mean).view([-1] + self.dim)
class FashionMNIST():
def __init__(self, batch_size, binarize=False, logit_transform=False):
""" [-1, 1, 28, 28]
"""
if binarize:
raise NotImplementedError
self.logit_transform = logit_transform
directory='./datasets/FashionMNIST'
if not os.path.exists(directory):
os.makedirs(directory)
kwargs = {'num_workers': num_workers, 'pin_memory': True} if torch.cuda.is_available() else {}
self.train_loader = DataLoader(
datasets.FashionMNIST(directory, train=True, download=True,
transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(
datasets.FashionMNIST(directory, train=False, download=True, transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=False, **kwargs)
self.dim = [1,28,28]
train = torch.stack([data for data, _ in
list(self.train_loader.dataset)], 0).cuda()
train = train.view(train.shape[0], -1)
if self.logit_transform:
train = train * 255.0
train = (train + torch.rand_like(train)) / 256.0
train = lamb + (1 - 2.0 * lamb) * train
train = torch.log(train) - torch.log(1.0 - train)
self.mean = train.mean(0)
self.logvar = torch.log(torch.mean((train - self.mean)**2)).unsqueeze(0)
def preprocess(self, x):
if self.logit_transform:
# apply uniform noise and renormalize
x = x.view([-1, np.prod(self.dim)]) * 255.0
x = (x + torch.rand_like(x)) / 256.0
x = lamb + (1 - 2.0 * lamb) * x
x = torch.log(x) - torch.log(1.0 - x)
return x - self.mean
else:
return x.view([-1, np.prod(self.dim)]) - self.mean
def unpreprocess(self, x):
if self.logit_transform:
x = x + self.mean
x = torch.sigmoid(x)
x = (x - lamb) / (1.0 - 2.0 * lamb)
return x.view([-1] + self.dim)
else:
return (x + self.mean).view([-1] + self.dim)
class SVHN():
def __init__(self, batch_size, binarize=False, logit_transform=False):
""" [-1, 3, 32, 32]
"""
if binarize:
raise NotImplementedError
self.logit_transform = logit_transform
directory='./datasets/SVHN'
if not os.path.exists(directory):
os.makedirs(directory)
kwargs = {'num_workers': num_workers, 'pin_memory': True} if torch.cuda.is_available() else {}
self.train_loader = DataLoader(
datasets.SVHN(root=directory,split='train', download=True,
transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(
datasets.SVHN(root=directory, split='test', download=True, transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=False, **kwargs)
self.dim = [3, 32, 32]
train = torch.stack([data for data, _ in
list(self.train_loader.dataset)], 0).cuda()
train = train.view(train.shape[0], -1)
if self.logit_transform:
train = train * 255.0
train = (train + torch.rand_like(train)) / 256.0
train = lamb + (1 - 2.0 * lamb) * train
train = torch.log(train) - torch.log(1.0 - train)
self.mean = train.mean(0)
self.logvar = torch.log(torch.mean((train - self.mean)**2)).unsqueeze(0)
def preprocess(self, x):
if self.logit_transform:
# apply uniform noise and renormalize
x = x.view([-1, np.prod(self.dim)]) * 255.0
x = (x + torch.rand_like(x)) / 256.0
x = lamb + (1 - 2.0 * lamb) * x
x = torch.log(x) - torch.log(1.0 - x)
return x - self.mean
else:
return x.view([-1, np.prod(self.dim)]) - self.mean
def unpreprocess(self, x):
if self.logit_transform:
x = x + self.mean
x = torch.sigmoid(x)
x = (x - lamb) / (1.0 - 2.0 * lamb)
return x.view([-1] + self.dim)
else:
return (x + self.mean).view([-1] + self.dim)
class CIFAR10():
def __init__(self, batch_size, binarize=False, logit_transform=False):
""" [-1, 3, 32, 32]
"""
if binarize:
raise NotImplementedError
self.logit_transform = logit_transform
directory='./datasets/CIFAR10'
if not os.path.exists(directory):
os.makedirs(directory)
kwargs = {'num_workers': num_workers, 'pin_memory': True} if torch.cuda.is_available() else {}
self.train_loader = DataLoader(
datasets.CIFAR10(root=directory, train=True, download=True,
transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(
datasets.CIFAR10(root=directory, train=False, transform=transforms.ToTensor()),
batch_size=batch_size, shuffle=False, **kwargs)
self.dim = [3, 32, 32]
train = torch.stack([data for data, _ in
list(self.train_loader.dataset)], 0).cuda()
train = train.view(train.shape[0], -1)
if self.logit_transform:
train = train * 255.0
train = (train + torch.rand_like(train)) / 256.0
train = lamb + (1 - 2.0 * lamb) * train
train = torch.log(train) - torch.log(1.0 - train)
self.mean = train.mean(0)
self.logvar = torch.log(torch.mean((train - self.mean)**2)).unsqueeze(0)
def preprocess(self, x):
if self.logit_transform:
# apply uniform noise and renormalize
x = x.view([-1, np.prod(self.dim)]) * 255.0
x = (x + torch.rand_like(x)) / 256.0
x = lamb + (1 - 2.0 * lamb) * x
x = torch.log(x) - torch.log(1.0 - x)
return x - self.mean
else:
return x.view([-1, np.prod(self.dim)]) - self.mean
def unpreprocess(self, x):
if self.logit_transform:
x = x + self.mean
x = torch.sigmoid(x)
x = (x - lamb) / (1.0 - 2.0 * lamb)
return x.view([-1] + self.dim)
else:
return (x + self.mean).view([-1] + self.dim)
class OMNIGLOT(Dataset):
def __init__(self, batch_size, binarize=False, logit_transform=False):
""" [ -1, 1, 28, 28]
"""
if binarize:
raise NotImplementedError
self.logit_transform = logit_transform
directory='./datasets/OMNIGLOT'
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(os.path.join(directory, 'chardata.mat')):
print ('Downloading Omniglot images_background.zip...')
urllib.request.urlretrieve('https://github.com/yburda/iwae/raw/master/datasets/OMNIGLOT/chardata.mat',
os.path.join(directory, 'chardata.mat'))
data = loadmat(os.path.join(directory, 'chardata.mat'))
# between 0~1.
train = data['data'].swapaxes(0,1).reshape((-1, 1, 28, 28)).astype('float32')
test = data['testdata'].swapaxes(0,1).reshape((-1, 1, 28, 28)).astype('float32')
train_labels = np.zeros(train.shape[0])
test_labels = np.zeros(test.shape[0])
train_dataset = TensorDataset(torch.from_numpy(train), torch.from_numpy(train_labels))
test_dataset = TensorDataset(torch.from_numpy(test), torch.from_numpy(test_labels))
kwargs = {'num_workers': num_workers, 'pin_memory': True} if torch.cuda.is_available() else {}
self.train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
self.dim = [1, 28, 28]
train = torch.stack([data for data, _ in
list(self.train_loader.dataset)], 0).cuda()
train = train.view(train.shape[0], -1)
if self.logit_transform:
train = train * 255.0
train = (train + torch.rand_like(train)) / 256.0
train = lamb + (1 - 2.0 * lamb) * train
train = torch.log(train) - torch.log(1.0 - train)
self.mean = train.mean(0)
self.logvar = torch.log(torch.mean((train - self.mean)**2)).unsqueeze(0)
def preprocess(self, x):
if self.logit_transform:
# apply uniform noise and renormalize
x = x.view([-1, np.prod(self.dim)]) * 255.0
x = (x + torch.rand_like(x)) / 256.0
x = lamb + (1 - 2.0 * lamb) * x
x = torch.log(x) - torch.log(1.0 - x)
return x - self.mean
else:
return x.view([-1, np.prod(self.dim)]) - self.mean
def unpreprocess(self, x):
if self.logit_transform:
x = x + self.mean
x = torch.sigmoid(x)
x = (x - lamb) / (1.0 - 2.0 * lamb)
return x.view([-1] + self.dim)
else:
return (x + self.mean).view([-1] + self.dim)
```
#### File: 2channelkrt/VLAE/distribution.py
```python
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import utils
class Bernoulli():
def __init__(self, mu):
self.mu = mu
def log_probability(self, x):
self.mu = torch.clamp(self.mu, min=1e-5, max=1.0 - 1e-5)
return (x * torch.log(self.mu) + (1.0 - x) * torch.log(1 - self.mu)).sum(1)
def sample(self):
return (torch.rand_like(self.mu).to(device=self.mu.device) < self.mu).to(torch.float)
class DiagonalGaussian():
def __init__(self, mu, logvar):
self.mu = mu
self.logvar = logvar
def log_probability(self, x):
return -0.5 * torch.sum(np.log(2.0*np.pi) + self.logvar + ((x - self.mu)**2)
/ torch.exp(self.logvar), dim=1)
def sample(self):
eps = torch.randn_like(self.mu)
return self.mu + torch.exp(0.5 * self.logvar) * eps
def repeat(self, n):
mu = self.mu.unsqueeze(1).repeat(1, n, 1).view(-1, self.mu.shape[-1])
logvar = self.logvar.unsqueeze(1).repeat(1, n, 1).view(-1, self.logvar.shape[-1])
return DiagonalGaussian(mu, logvar)
@staticmethod
def kl_div(p, q):
return 0.5 * torch.sum(q.logvar - p.logvar - 1.0 + (torch.exp(p.logvar) + (p.mu - q.mu)**2)/(torch.exp(q.logvar)), dim=1)
class Gaussian():
def __init__(self, mu, precision):
# mu: [batch_size, z_dim]
self.mu = mu
# precision: [batch_size, z_dim, z_dim]
self.precision = precision
# TODO: get rid of the inverse for efficiency
self.L = torch.cholesky(torch.inverse(precision))
self.dim = self.mu.shape[1]
def log_probability(self, x):
indices = np.arange(self.L.shape[-1])
return -0.5 * (self.dim * np.log(2.0*np.pi)
+ 2.0 * torch.log(self.L[:, indices, indices]).sum(1)
+ torch.matmul(torch.matmul((x - self.mu).unsqueeze(1), self.precision),
(x - self.mu).unsqueeze(-1)).sum([1, 2]))
def sample(self):
eps = torch.randn_like(self.mu)
return self.mu + torch.matmul(self.L, eps.unsqueeze(-1)).squeeze(-1)
def repeat(self, n):
mu = self.mu.unsqueeze(1).repeat(1, n, 1).view(-1, self.mu.shape[-1])
precision = self.precision.unsqueeze(1).repeat(1, n, 1, 1).view(-1, *self.precision.shape[1:])
return Gaussian(mu, precision)
```
#### File: 2channelkrt/VLAE/utils.py
```python
import torch
def clip_grad(gradient, clip_value):
""" clip between clip_min and clip_max
"""
return torch.clamp(gradient, min=-clip_value, max=clip_value)
def clip_grad_norm(gradient, clip_value):
norm = (gradient**2).sum(-1)
divisor = torch.max(torch.ones_like(norm).cuda(), norm / clip_value)
return gradient / divisor.unsqueeze(-1)
``` |
{
"source": "2chips/PixivBiu",
"score": 2
} |
#### File: PixivBiu/app/platform.py
```python
import traceback
import json
import yaml
import sys
import os
ENVIRON = {"ROOTPATH": os.path.split(os.path.realpath(sys.argv[0]))[0] + "/"}
class CMDProcessor(object):
PLUGINS = {}
CORES_LIST = []
def process(self, cmd):
if not cmd in self.PLUGINS.keys():
return {"code": 0, "msg": "no method"}
for x in self.CORES_LIST:
f = getattr(self, x)()
setattr(self, x, f)
self.ENVIRON = ENVIRON
try:
r = self.PLUGINS[cmd](self).pRun(cmd)
return r
except Exception as e:
print("[system] Plugin \033[1;37;46m %s \033[0m failed to run" % cmd)
print("\033[31m[ERROR] %s\033[0m" % e)
print("\033[31m%s\033[0m" % traceback.format_exc())
return {"code": 0, "msg": "plugin error"}
@classmethod
def plugin_register(cls, plugin_name):
def wrapper(plugin):
cls.PLUGINS.update({plugin_name: plugin})
return plugin
return wrapper
@classmethod
def core_register(cls, core_name):
def wrapper(core):
setattr(cls, core_name, core)
cls.CORES_LIST.append(core_name)
return core
return wrapper
@classmethod
def core_register_auto(cls, core_name, loads={}):
info = {"ENVIRON": ENVIRON}
for x in loads:
info[x] = cls.loadSet(loads[x])
def wrapper(core):
try:
setattr(cls, core_name, core(info).auto())
except Exception as e:
print(
"[system] Core \033[1;37;46m %s \033[0m failed to load" % core_name
)
print("\033[31m[ERROR] %s\033[0m" % e)
print("\033[31m%s\033[0m" % traceback.format_exc())
return core
return wrapper
@staticmethod
def getEnv():
return ENVIRON
@staticmethod
def loadSet(uri):
uri = uri.replace("{ROOTPATH}", ENVIRON["ROOTPATH"])
try:
with open(uri, "r", encoding="UTF-8") as c:
sfx = uri.split(".")[-1]
if sfx == "json":
return json.load(c)
elif sfx == "yml" or sfx == "yaml":
return yaml.safe_load(c)
else:
return c
except Exception as e:
print("[system] \033[1;37;46m %s \033[0m failed to load" % uri)
print("\033[31m[ERROR] %s\033[0m" % e)
print("\033[31m%s\033[0m" % traceback.format_exc())
return None
```
#### File: biu/do/unfollow.py
```python
from ....platform import CMDProcessor
@CMDProcessor.plugin_register("api/biu/do/unfollow")
class doUnFollow(object):
def __init__(self, MOD):
self.MOD = MOD
def pRun(self, cmd):
if self.MOD.biu.apiType != "public":
return {"code": 0, "msg": "only support public api"}
try:
args = self.MOD.args.getArgs(
"unfollow",
[
"userID",
(
"restrict=%s"
% self.MOD.biu.sets["biu"]["common"]["defaultActionType"]
),
],
)
except:
return {"code": 0, "msg": "missing parameters"}
return {
"code": 1,
"msg": {
"way": "do",
"args": args,
"rst": self.unFollow(args["ops"].copy(), args["fun"].copy()),
},
}
def unFollow(self, opsArg, funArg):
self.MOD.args.argsPurer(
funArg, {"userID": "user_ids", "restrict": "publicity"}
)
r = self.MOD.biu.api.me_favorite_users_unfollow(**funArg)
return {"api": "public", "data": r}
``` |
{
"source": "2Clutch/magic",
"score": 3
} |
#### File: magic/ch_1/test_flat.py
```python
from ch_1.flat import Flat
def test_flat():
assert Flat.flat(Flat(), sample_list=[1, 2, [3]]) == [1, 2, 3]
assert Flat.flat(Flat(), sample_list=[1, 2, [3], []]) == [1, 2, 3]
assert Flat.flat(Flat(), sample_list=[1, 2, [3], [3, 4, 5]]) == [1, 2, 3, 3, 4, 5]
assert Flat.flat(Flat(), sample_list=[1, 2, [3], [7, [9, 2, 5], 4, 3, 2]]) == [1, 2, 3, 7, 9, 2, 5, 4, 3, 2]
``` |
{
"source": "2Clutch/mlh_workshop_challenge",
"score": 3
} |
#### File: 2Clutch/mlh_workshop_challenge/search.py
```python
import GetOldTweets3 as got
from pprint import pprint as pp
class TwitterScraper:
def __init__(self):
self.user = 'pascivite'
self.count = 10
self.tweet_criteria = None
self.tweets = None
def scrape_latest_tweets(self):
"""
Retrieve latest tweets from a given user
Args:
user (string): twitter username
count (int): number of tweets to retrieve
:return:
list of tweets and additional relevant information
"""
# Creation of query object
self.tweet_criteria = got.manager.TweetCriteria().setUsername(self.user).setMaxTweets(self.count)
# Creation of list that contains all tweets
self.tweets = got.manager.TweetManager.getTweets(self.tweet_criteria)
return self.tweets
if __name__ == '__main__':
test = TwitterScraper()
tweets = test.scrape_latest_tweets()
for i in range(1):
pp(tweets[0].__dict__)
``` |
{
"source": "2Clutch/vistar_coding_challenge",
"score": 3
} |
#### File: 2Clutch/vistar_coding_challenge/state-server.py
```python
import json
import argparse
from flask import Flask
from shapely.geometry import Polygon as pol, Point as p
app = Flask(__name__)
with open("states.json") as f:
data = json.load(f)
@app.route('/', methods=['GET', 'POST'])
def search(cord):
parser = argparse.ArgumentParser()
lon = parser.add_argument("longitude", type=int)
lat = parser.add_argument("latitude", type=int)
args = parser.parse_args()
if cord:
point = p(cord[args[lon]], cord[args[lat]])
for state in data:
polygon = pol(state['border'])
if polygon.contains(point):
return state['state']
if __name__ == '__main__':
app.run()
``` |
{
"source": "2cracer2/QQchatlog_Analysis",
"score": 3
} |
#### File: chatlog/analysis/interesting.py
```python
import pandas as pd
from pymongo import MongoClient
class Interesting(object):
def __init__(self):
self.client = MongoClient() # 默认连接 localhost 27017
self.db = self.client.chatlog
self.post = self.db.vczh
def longest_name(self):
"""
取出所有用户的name,并排序。
..note::由于聊天记录时间跨度大,有的聚聚改名频繁
:return:top_list[('name',len(name)),(...,...),...] 按长度从大到小排序
"""
res_list = []
for doc in self.post.find({}, {'name': 1}):
res_list.append(doc['name'])
res_list = {}.fromkeys(res_list).keys()
top_list = []
for li in res_list:
top_list.append((li, len(li)))
return pd.DataFrame(data=sorted(top_list, key=lambda x: x[1], reverse=True), columns=['马甲名', '字符数']).head(10)
def longest_formation(self):
"""
所有记录中,跟队形最长的聊天记录。
:return:top_list[('text',len(text)),(...,...),...] 按长度从大到小排序
"""
res_list = []
for doc in self.post.find({}, {'text': 1}):
res_list.append(doc['text'])
top_list = []
# text 数据存储形式 [[sentences1],[sentences2],...] 队形大多只有一句 所以只考虑text长度为1的
i = 0
while i < len(res_list) - 1:
if res_list[i][0] == '[图片]':
i += 1
elif res_list[i][0] == res_list[i + 1][0]:
pos = i + 1
while pos < len(res_list) - 1:
if res_list[pos][0] == res_list[pos + 1][0]:
pos += 1
else:
if pos - i + 1 > 2:
top_list.append((res_list[i][0], pos - i + 1))
i = pos + 1
break
else:
i += 1
# 例如中间有人插话一句将队形打断的话,整合队形
k = 0
while k < len(top_list) - 1:
if top_list[k][0] == top_list[k + 1][0]:
top_list.append((top_list[k][0], top_list[k][1] + top_list[k + 1][1]))
top_list.pop(k - 1)
top_list.pop(k)
else:
k += 1
return pd.DataFrame(data =sorted(top_list, key=lambda x: x[1], reverse=True),columns=['整齐的队形','次数'])
# 返回违禁词发言者
def get_senstive_words(self):
self.post = self.db.senstive_words
sw_list = list(self.post.find({}, {'_id': 0,'name': 1, 'time': 1, 'text':1}))
return pd.DataFrame(sw_list).sort_values(by=['time'])
def close(self):
self.client.close()
```
#### File: chatlog/base/read_chatlog.py
```python
import re
from pymongo import MongoClient
from chatlog.base import constant
class ReadChatlog(object):
def __init__(self, file_path, db_name='chatlog', collection_name='vczh'):
self.file_path = file_path
self.client = MongoClient() # 默认连接 localhost 27017
self.db = self.client[db_name]
self.post = self.db[collection_name]
# 初始化两个常用正则
self.time_pattern = re.compile(constant.JUDGE_TIME_RE)
self.ID_pattern = re.compile(constant.JUDGE_ID_RE)
def _judge_start_line(self, message):
"""
判断某行是不是起始行
条件1:YYYY-MM-DD HH-MM-SS开头(长度大于19)
条件2::(XXXXXXXXX)或者<[email protected]>结尾
:return: False or (time,ID)
"""
if len(message) > 19 and (self.time_pattern.match(message)) and (self.ID_pattern.search(message)):
return self.time_pattern.search(message).group(), self.ID_pattern.search(message).group()
return False
def work(self):
"""
腾讯导出的聊天记录是UTF-8+bom的 手动改成-bom
进行数据清洗,将原始数据划分成块保存进mongodb中
..note::例子
time:YYYY-MM-DD HH-MM-SS
ID:(XXXXXXXXX)或者<xxx@xxx.<EMAIL>>
name:username
text:['sentence1','sentence2',...]
"""
print('----------正在进行数据清洗-------------')
with open(self.file_path, 'r', encoding='utf-8') as chatlog_file:
chatlog_list = [line.strip() for line in chatlog_file if line.strip() != ""]
now_cursor = 0 # 当前分析位置
last = 0 # 上一个行首位置
flag = 0
first_line_info = self._judge_start_line(str(chatlog_list[now_cursor]))
while now_cursor < len(chatlog_list):
if self._judge_start_line(str(chatlog_list[now_cursor])):
if not flag:
first_line_info = self._judge_start_line(str(chatlog_list[now_cursor]))
last = now_cursor
flag = 1
else:
flag = 0
send_time = first_line_info[0]
send_id = first_line_info[1]
# 如果什么消息都没发直接不插入
if not chatlog_list[last + 1:now_cursor]:
continue
# 截取发送该消息时用户的马甲
name = chatlog_list[last].replace(send_id, "").replace(send_time, "").lstrip()
for extra_char in '()<>':
send_id = send_id.replace(extra_char, "")
# 由于等级标签有极大部分缺失,所以直接去除
for i in ['【潜水】', '【冒泡】', '【彩彩】', '【群地位倒数】', '【群主】', '【管理员】', '【吐槽】']:
if name[:len(i)] == i:
name = name.replace(i, "")
# 将时间格式统一
for li in '0123456789':
send_time = send_time.replace(' ' + li + ':', ' 0' + li + ':')
# 格式化数据插入数据库表中
self.post.insert_one({'time': send_time, 'ID': send_id, 'name': name,
'text': chatlog_list[last + 1:now_cursor]})
print('time:', send_time, 'ID:', send_id, 'name:', name)
print(chatlog_list[last + 1:now_cursor])
print("------------------------------------------------")
continue
now_cursor += 1
self.client.close()
print('----------数据清洗完成-------------')
```
#### File: chatlog/base/user_profile.py
```python
from datetime import datetime
from pymongo import MongoClient
class UserProfile:
def __init__(self, db_name='chatlog', collection_name='vczh'):
print("正在初始化用户画像模块")
self.client = MongoClient() # 默认连接 localhost 27017
self.db = self.client[db_name]
self.post = self.db[collection_name]
self.res_list = [doc for doc in self.post.find({}, {'_id': 0})]
def close(self):
self.client.close()
def _get_user_id_list(self):
"""
获取记录中所有ID的列表
:return:[id1,id2,id3,...]
"""
user_id_list = [li['ID'] for li in self.res_list]
user_id_list = list(set(user_id_list))
print('记录中共有', len(user_id_list), '位聚聚发过言')
return user_id_list
def _get_all_name(self, user_id):
"""
根据ID返回一个用户所有曾用名
:param user_id:用户ID
:return:{'name1','name2',...}
"""
name_list = set()
for li in self.res_list:
if li['ID'] == user_id:
name_list.add(li['name'])
return list(name_list)
def _get_speak_infos(self, user_id):
"""
返回一个用户的发言次数,发言文字数,发言图片数
:param user_id:用户ID
:return:[speak_num,word_num,photo_num]
"""
speak_num = 0
word_num = 0
photo_num = 0
for li in self.res_list:
if li['ID'] == user_id:
speak_num += 1
for sp in li['text']:
word_num += len(sp)
photo_num += sp.count('[图片]')
return speak_num, word_num, photo_num
def _get_online_time(self, user_id):
"""
返回一个用户在那个时段发言数最多(0-24小时)(周1-7)
:param user_id:用户ID
:return:[[0,0,0,0],[],[],[],...] [周1-7]包含[0-24小时]
"""
time_list = []
for li in self.res_list:
if li['ID'] == user_id:
time_list.append(li['time'])
week_list = [[0 for _ in range(24)] for _ in range(7)]
for li in time_list:
week_list[int(datetime.strptime(li, "%Y-%m-%d %H:%M:%S").weekday())][int(li[11:13])] += 1
return week_list
def work(self):
"""
分析所有用户基本画像并存入数据库
..note::
ID:
name:[name1,name2,...]
speak_num:发言次数
word_num:发言字数
photo_num:发布图片数
week_online:周活跃分布
ban_time:禁言时间
:return:None
"""
post = self.db.profile
user_id_list = self._get_user_id_list()
for li in user_id_list:
print('正在构建用户', li, '的用户画像')
name_list = self._get_all_name(li)
speak_num, word_num, photo_num = self._get_speak_infos(li)
week_online = self._get_online_time(li)
ban_time = self._ban_time(li)
post.insert_one({'ID': li, 'name_list': name_list, 'speak_num': speak_num,
'word_num': word_num, 'photo_num': photo_num,
'week_online': week_online, 'ban_time': ban_time})
self.close()
# TODO 管理员若解禁则扣除时间
def _ban_time(self, user_id):
"""
统计用户累计禁言时间
:return:
"""
def add_time(add_list):
time = 0
for times in add_list:
for info in [('天', 60 * 24), ('小时', 60), ('分钟', 1)]:
if info[0] in times:
index = times.find(info[0])
if times[index - 2].isdigit():
time += int(times[index - 2:index]) * info[1]
else:
time += int(times[index - 1:index]) * info[1]
return time
name_list = self._get_all_name(user_id)
res_list = []
for li in self.post.find({'ID': '10000'}, {'text': 1}):
if '被管理员禁言' in li['text'][0]:
res_list.append(li['text'][0].split(' 被管理员禁言'))
time_list = []
for li in res_list:
for name in name_list:
if li[0] == name:
time_list.append(li[1])
return add_time(time_list)
```
#### File: chatlog/visualization/word_img.py
```python
import sys
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from pymongo import MongoClient
from wordcloud import WordCloud, ImageColorGenerator
from chatlog.base.seg_word import SegWord
class WordImg(object):
def __init__(self):
self.client = MongoClient() # 默认连接 localhost 27017
self.db = self.client.chatlog
self.post = self.db.word
def close(self):
self.client.close()
def draw_wordcloud(self, word_dict, name):
cat_mask = np.array(Image.open('./visualization/cat.png'))
wc = WordCloud(font_path='./visualization/msyh.ttc',
width=800, height=400,
background_color="white", # 背景颜色
mask=cat_mask, # 设置背景图片
min_font_size=6
)
wc.fit_words(word_dict)
image_colors = ImageColorGenerator(cat_mask)
# recolor wordcloud and show
# we could also give color_func=image_colors directly in the constructor
plt.imshow(wc)
plt.axis("off")
plt.savefig('./img/' + name + '.png', dpi=800)
plt.close()
def PL_wordcloud(self):
word_dict = {'JAVA': ['java', 'jawa'], 'C++': ['c++', 'c艹'], 'C': ['c', 'c语言'],
'PHP': ['php'], 'Python': ['py', 'python'], 'C#': ['c#']}
self.draw_wordcloud(self.word_fre(word_dict), sys._getframe().f_code.co_name)
def all_wordcloud(self, word_len=0):
word_dict = {}
stop_word = ['图片', '表情', '说','[]','在','的','[',']','都']
for doc in self.post.find({}):
if len(doc['word']) > word_len and doc['word'] not in stop_word:
word_dict[doc['word']] = doc['item']
self.draw_wordcloud(word_dict, sys._getframe().f_code.co_name + str(word_len))
def company_wordcloud(self):
word_dict = {'Microsoft': ['微软', '巨硬', 'ms', 'microsoft'], 'Tencent': ['腾讯', 'tencent', '鹅厂'],
'360': ['360', '安全卫士', '奇虎'], 'Netease': ['netease', '网易', '猪场'],
'JD': ['jd', '京东', '某东', '狗东'], 'Taobao': ['淘宝', '天猫', 'taobao'],
'BaiDu': ['百度', '某度', 'baidu'], 'ZhiHu': ['zhihu', '知乎', '你乎', '某乎'],
'Sina': ['新浪', 'sina', '微博', 'weibo']}
self.draw_wordcloud(self.word_fre(word_dict), sys._getframe().f_code.co_name)
def word_fre(self, word_dict):
word_fre = {}
for key in word_dict.keys():
word_fre[key] = 0
res_dict = {}
for doc in self.post.find({}):
res_dict[doc['word']] = doc['item']
for res_key in res_dict.keys():
for word_key in word_dict.keys():
if str(res_key).lower() in word_dict[word_key]:
word_fre[word_key] = word_fre[word_key] + res_dict[res_key]
return word_fre
def longest_formation_wordcloud(self):
top_words = self.top_words()
top_words = top_words[3:110]
word_dict = {}
# TODO 取出词频前10词汇以及频率
for x in top_words:
word_dict [x[0]] = int (x[1])
cfc = np.array(Image.open('./visualization/cat2.png'))
wc = WordCloud(font_path='./visualization/msyh.ttc',
width=1080, height=720,
mask=cfc,
background_color="white", # 背景颜色
min_font_size=6
)
wc.fit_words(word_dict)
plt.imshow(wc)
plt.axis("off")
plt.savefig('./img/' + sys._getframe().f_code.co_name + '.png', dpi=800)
plt.show()
plt.close()
def work(self):
self.PL_wordcloud()
self.company_wordcloud()
self.all_wordcloud()
self.longest_formation_wordcloud()
self.close()
# TODO 取出词频前10词汇以及频率
def top_words(self):
self.post = self.db.word
top_list = []
for doc in self.post.find({}, {'word': 1, 'item': 1, }):
top_list.append((doc['word'], doc['item']))
return sorted(top_list, key=lambda x: x[1], reverse=True)
``` |
{
"source": "2Cubed/ProjectEuler",
"score": 3
} |
#### File: ProjectEuler/euler/__init__.py
```python
from importlib import import_module
from os import listdir
from os.path import abspath, dirname
from re import match
SOLVED = set(
int(m.group(1))
for f in listdir(abspath(dirname(__file__)))
for m in (match(r"^p(\d{3})\.py$", f),) if m
)
def compute(problem: int):
"""Compute the answer to problem `problem`."""
assert problem in SOLVED, "Problem currently unsolved."
module = import_module("euler.p{:03d}".format(problem))
return module.compute()
```
#### File: ProjectEuler/euler/p001.py
```python
NUMBERS = 3, 5
MAXIMUM = 1000
def compute(*numbers, maximum=MAXIMUM):
"""Compute the sum of the multiples of `numbers` below `maximum`."""
if not numbers:
numbers = NUMBERS
multiples = tuple(set(range(0, maximum, number)) for number in numbers)
return sum(set().union(*multiples))
```
#### File: ProjectEuler/euler/p004.py
```python
DIGITS = 3
def compute(digits=DIGITS):
"""Find the largest palindromic number made from the product of two numbers
of lengths `digits`.
"""
values = list()
for num1 in range(10**digits, 10**(digits-1), -1):
for num2 in range(10**digits, 10**(digits-1), -1):
product = num1 * num2
if str(product) == str(product)[::-1]:
values.append(product)
return max(values)
```
#### File: ProjectEuler/euler/p009.py
```python
ABC_SUM = 1000
def compute(abc_sum=ABC_SUM):
"""Compute the product *abc* of the first Pythagorean triplet a²+b²=c² for
which the sum of a, b, and c is equal to `abc_sum`.
"""
for c_value in range(abc_sum):
for a_value in range(abc_sum - c_value):
if a_value**2 + (abc_sum - c_value - a_value)**2 == c_value**2:
return a_value * (abc_sum - c_value - a_value) * c_value
```
#### File: ProjectEuler/euler/p022.py
```python
from string import ascii_uppercase
LETTER_MAP = dict((v, k) for k, v in enumerate(ascii_uppercase, 1))
FILENAME = "resources/p022_names.txt"
def compute(filename=FILENAME):
"""Compute the total of all "name scores" (see problem) in `filename`."""
names = sorted(open(filename, 'r').read()[1:-1].split(r'","'))
return sum(
position*sum(LETTER_MAP[letter] for letter in name)
for position, name in enumerate(names, 1)
)
``` |
{
"source": "2dadsgn/smart-vase-sensor-raspberry",
"score": 4
} |
#### File: 2dadsgn/smart-vase-sensor-raspberry/DHT11.py
```python
import RPi.GPIO as GPIO
import time
import Freenove_DHT as DHT
DHTPin = 11 #define the pin of DHT11
def loop():
dht = DHT.DHT(DHTPin) #create a DHT class object
sumCnt = 0 #number of reading times
while(True):
sumCnt += 1 #counting number of reading times
chk = dht.readDHT11() #read DHT11 and get a return value. Then determine whether data read is normal according to the return value.
print ("The sumCnt is : %d, \t chk : %d"%(sumCnt,chk))
if (chk is dht.DHTLIB_OK): #read DHT11 and get a return value. Then determine whether data read is normal according to the return value.
print("DHT11,OK!")
elif(chk is dht.DHTLIB_ERROR_CHECKSUM): #data check has errors
print("DHTLIB_ERROR_CHECKSUM!!")
elif(chk is dht.DHTLIB_ERROR_TIMEOUT): #reading DHT times out
print("DHTLIB_ERROR_TIMEOUT!")
else: #other errors
print("Other error!")
print("Humidity : %.2f, \t Temperature : %.2f \n"%(dht.humidity,dht.temperature))
time.sleep(2)
if __name__ == '__main__':
print ('Program is starting ... ')
try:
loop()
except KeyboardInterrupt:
GPIO.cleanup()
exit()
``` |
{
"source": "2dadsgn/Smart-vase-webapp-flask-",
"score": 3
} |
#### File: site-packages/bson/son.py
```python
import copy
import re
from bson.py3compat import abc, iteritems
# This sort of sucks, but seems to be as good as it gets...
# This is essentially the same as re._pattern_type
RE_TYPE = type(re.compile(""))
class SON(dict):
"""SON data.
A subclass of dict that maintains ordering of keys and provides a
few extra niceties for dealing with SON. SON provides an API
similar to collections.OrderedDict from Python 2.7+.
"""
def __init__(self, data=None, **kwargs):
self.__keys = []
dict.__init__(self)
self.update(data)
self.update(kwargs)
def __new__(cls, *args, **kwargs):
instance = super(SON, cls).__new__(cls, *args, **kwargs)
instance.__keys = []
return instance
def __repr__(self):
result = []
for key in self.__keys:
result.append("(%r, %r)" % (key, self[key]))
return "SON([%s])" % ", ".join(result)
def __setitem__(self, key, value):
if key not in self.__keys:
self.__keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self.__keys.remove(key)
dict.__delitem__(self, key)
def keys(self):
return list(self.__keys)
def copy(self):
other = SON()
other.update(self)
return other
# TODO this is all from UserDict.DictMixin. it could probably be made more
# efficient.
# second level definitions support higher levels
def __iter__(self):
for k in self.__keys:
yield k
def has_key(self, key):
return key in self.__keys
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return [(key, self[key]) for key in self]
def clear(self):
self.__keys = []
super(SON, self).clear()
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got "\
+ repr(1 + len(args)))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = next(self.iteritems())
except StopIteration:
raise KeyError('container is empty')
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __eq__(self, other):
"""Comparison to another SON is order-sensitive while comparison to a
regular dictionary is order-insensitive.
"""
if isinstance(other, SON):
return len(self) == len(other) and self.items() == other.items()
return self.to_dict() == other
def __ne__(self, other):
return not self == other
def __len__(self):
return len(self.__keys)
def to_dict(self):
"""Convert a SON document to a normal Python dictionary instance.
This is trickier than just *dict(...)* because it needs to be
recursive.
"""
def transform_value(value):
if isinstance(value, list):
return [transform_value(v) for v in value]
elif isinstance(value, abc.Mapping):
return dict([
(k, transform_value(v))
for k, v in iteritems(value)])
else:
return value
return transform_value(dict(self))
def __deepcopy__(self, memo):
out = SON()
val_id = id(self)
if val_id in memo:
return memo.get(val_id)
memo[val_id] = out
for k, v in self.iteritems():
if not isinstance(v, RE_TYPE):
v = copy.deepcopy(v, memo)
out[k] = v
return out
```
#### File: _vendor/pytoml/test.py
```python
import datetime
from .utils import format_rfc3339
try:
_string_types = (str, unicode)
_int_types = (int, long)
except NameError:
_string_types = str
_int_types = int
def translate_to_test(v):
if isinstance(v, dict):
return { k: translate_to_test(v) for k, v in v.items() }
if isinstance(v, list):
a = [translate_to_test(x) for x in v]
if v and isinstance(v[0], dict):
return a
else:
return {'type': 'array', 'value': a}
if isinstance(v, datetime.datetime):
return {'type': 'datetime', 'value': format_rfc3339(v)}
if isinstance(v, bool):
return {'type': 'bool', 'value': 'true' if v else 'false'}
if isinstance(v, _int_types):
return {'type': 'integer', 'value': str(v)}
if isinstance(v, float):
return {'type': 'float', 'value': '{:.17}'.format(v)}
if isinstance(v, _string_types):
return {'type': 'string', 'value': v}
raise RuntimeError('unexpected value: {!r}'.format(v))
```
#### File: site-packages/setuptools/unicode_utils.py
```python
import re
import sys
import unicodedata
from setuptools.extern import six
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, six.text_type):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
def filesys_decode(path):
"""
Ensure that the given path is decoded,
NONE when no expected encoding works
"""
if isinstance(path, six.text_type):
return path
fs_enc = sys.getfilesystemencoding() or 'utf-8'
candidates = fs_enc, 'utf-8'
for enc in candidates:
try:
return path.decode(enc)
except UnicodeDecodeError:
continue
def try_encode(string, enc):
"turn unicode encoding into a functional routine"
try:
return string.encode(enc)
except UnicodeEncodeError:
return None
CODING_RE = re.compile(br'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)')
def detect_encoding(fp):
first_line = fp.readline()
fp.seek(0)
m = CODING_RE.match(first_line)
if m is None:
return None
return m.group(1).decode('ascii')
``` |
{
"source": "2daimehorisota/ros-test",
"score": 3
} |
#### File: ros-test/scripts/100times.py
```python
import rospy
from std_msgs.msg import Int32
f = 0
def cb(message):
global f
f = message.data*100
if __name__ == '__main__':
rospy.init_node('100times')
sub = rospy.Subscriber('count_up', Int32, cb)
pub = rospy.Publisher('100times', Int32, queue_size=1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(f)
rate.sleep()
``` |
{
"source": "2daysweb/mitpython",
"score": 3
} |
#### File: 2daysweb/mitpython/autopy.py
```python
import time
def morningMotive():
print('Welcome To Morning Motive')
print('Do not go where the path may lead, go instead where there is no path and leave a trail.')
time.sleep(5)
print('"To be yourself in a world that is constantly trying to make you something else is the greatest accomplishment"')
time.sleep(3)
print('"Write it on your heart that every day is the best day in the year"')
print('"For every minute you remain angry, you give up sixty seconds of peace of mind"')
print('"Our greatest glory is not in never failing, but in rising up every time we fail"')
```
#### File: 2daysweb/mitpython/cubedmit.py
```python
def cubed(x):
return x*x*x
#Clever use of while loop imo.
def triplePower(x, n):
while n>1:
x = cubed(x)
n = n/3
return x
#Note that the values of the global variables (x=5,y=1) don't influence the local procedure carried out in twoPower function
x = 5
n = 1
print(triplePower(2,3))
```
#### File: 2daysweb/mitpython/gsnchkgh.py
```python
x = int(input("Enter a number: "))
ans = 0
while ans**2 < x:
ans = ans + 1
if ans**2 == x:
print(str(ans) + " " + "is most def the square root of" + " " + str(x))
else:
print(str(x)+ " " + "is not a perfect square dewd")
``` |
{
"source": "2degrees/djeneralize",
"score": 2
} |
#### File: djeneralize/djeneralize/utils.py
```python
from django.http import Http404
__all__ = ['find_next_path_down', 'get_specialization_or_404']
def find_next_path_down(current_path, path_to_reduce, separator):
"""
Manipulate ``path_to_reduce`` so that it only contains one more level of
detail than ``current_path``.
:param current_path: The path used to determine the current level
:type current_path: :class:`basestring`
:param path_to_reduce: The path to find the next level down
:type path_to_reduce: :class:`basestring`
:param separator: The string used to separate the parts of path
:type separator: :class:`basestring`
:return: The path one level deeper than that of ``current_path``
:rtype: :class:`unicode`
"""
# Determine the current and next levels:
current_level = current_path.count(separator)
next_level = current_level + 1
# Reduce the path to reduce down to just one more level deep than the
# current path depth:
return u'%s%s' % (
separator.join(
path_to_reduce.split(separator, next_level)[:next_level]
), separator
)
def _get_queryset(klass):
"""
Returns a SpecializedQuerySet from a BaseGeneralizedModel sub-class,
SpecializationManager, or SpecializedQuerySet.
"""
# Need to import here to stop circular import problems
# TODO: move this functionality to a separate module
from djeneralize.manager import SpecializationManager
from djeneralize.query import SpecializedQuerySet
if isinstance(klass, SpecializedQuerySet):
return klass
elif isinstance(klass, SpecializationManager):
manager = klass
else:
manager = klass._default_specialization_manager
return manager.all()
def get_specialization_or_404(klass, *args, **kwargs):
"""
Uses get() to return an specializaed object, or raises a Http404 exception
if the object does not exist.
klass may be a BaseGeneralizedModel, SpecializationManager, or
SpecializedQuerySet object. All other passed arguments and keyword arguments
are used in the get() query.
.. note:: Like with get(), an MultipleObjectsReturned will be raised if more
than one object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404(
'No %s matches the given query.' % queryset.model._meta.object_name
)
```
#### File: djeneralize/tests/test_integration.py
```python
from django.db.models.aggregates import Avg
from django.db.models.expressions import F
from django.db.models.query import ValuesListQuerySet
from django.db.models.query import ValuesQuerySet
from django.db.models.query_utils import Q
from fixture.django_testcase import FixtureTestCase
from nose.tools import assert_not_equal
from nose.tools import eq_
from nose.tools import ok_
from tests.fixtures import BallPointPenData
from tests.fixtures import FountainPenData
from tests.fixtures import PenData
from tests.fixtures import PencilData
from tests.test_djeneralize.writing.models import WritingImplement
def compare_generalization_to_specialization(generalization, specialization):
eq_(generalization.pk, specialization.pk)
eq_(generalization.name, specialization.name)
eq_(generalization.length, specialization.length)
assert_not_equal(generalization, specialization)
class TestManager(FixtureTestCase):
datasets = [PenData, PencilData, FountainPenData, BallPointPenData]
class TestSpecializedQuerySet(FixtureTestCase):
datasets = [PenData, PencilData, FountainPenData, BallPointPenData]
def _check_attributes(self, normal_objects, specialized_objects):
"""
Helper test to run through the two querysets and
test various attributes
"""
for normal_object, specialized_object in zip(
normal_objects, specialized_objects
):
eq_(normal_object.__class__, WritingImplement)
assert_not_equal(specialized_object.__class__, WritingImplement)
compare_generalization_to_specialization(
normal_object,
specialized_object
)
ok_(isinstance(specialized_object, WritingImplement))
def test_all(self):
"""Check the all() method works correctly"""
all_objects = WritingImplement.objects.order_by('name')
all_specializations = WritingImplement.specializations.order_by('name')
eq_(len(all_objects), len(all_specializations))
self._check_attributes(all_objects, all_specializations)
def test_filter(self):
"""Check the filter() method works correctly"""
filtered_objects = WritingImplement.objects \
.filter(length__gte=10) \
.filter(name__endswith='pen')
filtered_specializations = WritingImplement.specializations \
.filter(name__endswith='pen') \
.filter(length__gte=10)
self._check_attributes(filtered_objects, filtered_specializations)
single_filter = WritingImplement.specializations.filter(
name__endswith='pen', length__gte=10
)
eq_(single_filter[0], filtered_specializations[0])
def test_exclude(self):
"""Check the exclude() method works correctly"""
excluded_objects = WritingImplement.objects.exclude(length__lt=9)
excluded_specializations = \
WritingImplement.specializations.exclude(length__lt=9)
self._check_attributes(excluded_objects, excluded_specializations)
def test_slice_index(self):
"""
Check that querysets can be sliced by a single index value correctly
"""
all_objects = WritingImplement.objects.order_by('name')
all_specializations = WritingImplement.specializations.order_by('name')
eq_(len(all_objects), len(all_specializations))
for i in range(len(all_objects)):
o = all_objects[i]
s = all_specializations[i]
compare_generalization_to_specialization(o, s)
def test_slice_range(self):
"""Test various range slices for compatibility"""
# Two numbers:
sliced_objects = WritingImplement.objects.order_by('name')[1:4]
sliced_specializations = \
WritingImplement.specializations.order_by('name')[1:4]
self._check_attributes(sliced_objects, sliced_specializations)
# Just end point:
sliced_objects = WritingImplement.objects.order_by('length')[:3]
sliced_specializations = \
WritingImplement.specializations.order_by('length')[:3]
self._check_attributes(sliced_objects, sliced_specializations)
# Just start point:
sliced_objects = WritingImplement.objects.order_by('-length')[1:]
sliced_specializations = \
WritingImplement.specializations.order_by('-length')[1:]
self._check_attributes(sliced_objects, sliced_specializations)
def test_order(self):
"""Test various orderings for compatibility"""
# By name:
ordered_objects = WritingImplement.objects.order_by('name')
ordered_specializations = \
WritingImplement.specializations.order_by('name')
self._check_attributes(ordered_objects, ordered_specializations)
# By inverse length and then name:
ordered_objects = WritingImplement.objects.order_by('-length', 'name')
ordered_specializations = WritingImplement.specializations.order_by(
'-length', 'name'
)
self._check_attributes(ordered_objects, ordered_specializations)
def test_get(self):
"""Check that the get() method behaves correctly"""
general = WritingImplement.objects.get(name=PenData.GeneralPen.name)
specialized = WritingImplement.specializations.get(
name=PenData.GeneralPen.name
)
self._check_attributes([general], [specialized])
def test_values(self):
"""Check values returns a ValuesQuerySet in both cases"""
normal_values = WritingImplement.objects.values('pk', 'name')
specialized_values = \
WritingImplement.specializations.values('pk', 'name')
ok_(isinstance(normal_values, ValuesQuerySet))
ok_(isinstance(specialized_values, ValuesQuerySet))
for normal_item, specialized_item in zip(
normal_values, specialized_values
):
eq_(normal_item['name'], specialized_item['name'])
eq_(normal_item['pk'], specialized_item['pk'])
def test_values_list(self):
"""Check values_list returns a ValuesListQuerySet in both cases"""
normal_values = WritingImplement.objects.values_list('pk', 'length')
specialized_values = WritingImplement.specializations.values_list(
'pk', 'length'
)
ok_(isinstance(normal_values, ValuesListQuerySet))
ok_(isinstance(specialized_values, ValuesListQuerySet))
for (n_pk, n_length), (s_pk, s_length) in zip(
normal_values, specialized_values
):
eq_(n_pk, s_pk)
eq_(n_length, s_length)
def test_flat_values_list(self):
"""
Check value_list with flat=True returns a ValuesListQuerySet in both
cases
"""
normal_values = WritingImplement.objects.values_list('pk', flat=True)
specialized_values = WritingImplement.specializations.values_list(
'pk', flat=True
)
ok_(isinstance(normal_values, ValuesListQuerySet))
ok_(isinstance(specialized_values, ValuesListQuerySet))
eq_(list(normal_values), list(specialized_values))
def test_aggregate(self):
"""Aggregations work on both types of querysets in the same manner"""
normal_aggregate = WritingImplement.objects.aggregate(Avg('length'))
specialized_aggregate = \
WritingImplement.specializations.aggregate(Avg('length'))
eq_(normal_aggregate, specialized_aggregate)
def test_count(self):
"""Counts work over both types of querysets"""
normal_count = WritingImplement.objects.filter(length__lt=13).count()
specialized_count = \
WritingImplement.objects.filter(length__lt=13).count()
eq_(normal_count, specialized_count)
def test_in_bulk(self):
"""In bulk works across both types of queryset"""
ids = list(WritingImplement.objects.values_list('pk', flat=True))[2:]
normal_bulk = WritingImplement.objects.in_bulk(ids)
specialized_bulk = WritingImplement.specializations.in_bulk(ids)
eq_(normal_bulk.keys(), specialized_bulk.keys())
self._check_attributes(normal_bulk.values(), specialized_bulk.values())
def test_update(self):
"""update() works the same across querysets"""
original_lengths = list(
WritingImplement.objects.order_by('length').values_list(
'length', flat=True
)
)
WritingImplement.specializations.all().update(length=1+F('length'))
new_lengths = list(
WritingImplement.objects.order_by('length').values_list(
'length', flat=True
)
)
for original_length, new_length in zip(original_lengths, new_lengths):
eq_(original_length+1, new_length)
def test_complex_query(self):
"""SpecializedQuerysets can be constructed from Q objects"""
q_small = Q(length__lt=10)
q_large = Q(length__gt=13)
normal_objects = WritingImplement.objects.filter(q_small | q_large)
specialized_objects = WritingImplement.specializations.filter(
q_small | q_large
)
self._check_attributes(normal_objects, specialized_objects)
``` |
{
"source": "2degrees/drf-nested-resources",
"score": 2
} |
#### File: drf-nested-resources/tests/__init__.py
```python
import os
import sys
from django import setup as dj_setup
from django.test.utils import setup_databases
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.extend([
os.path.join(BASE_DIR, 'tests'),
])
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_project.project.settings'
def setup():
setup_databases(0, False)
dj_setup()
```
#### File: drf-nested-resources/tests/test_routers.py
```python
from django.conf.urls import include
from django.conf.urls import url
from django.urls import resolve
from nose.tools import assert_raises
from nose.tools import eq_
from nose.tools import ok_
from rest_framework.reverse import reverse
from rest_framework.routers import SimpleRouter
from rest_framework.test import APIRequestFactory
from rest_framework.versioning import NamespaceVersioning
from django_project.languages.models import Website
from django_project.languages.models import WebsiteVisit
from django_project.languages.views import DeveloperViewSet
from django_project.languages.views import DeveloperViewSet2
from django_project.languages.views import ProgrammingLanguageVersionViewSet
from django_project.languages.views import ProgrammingLanguageViewSet
from django_project.languages.views import WebsiteHostViewSet
from django_project.languages.views import WebsiteViewSet
from django_project.languages.views import WebsiteVisitViewSet
from drf_nested_resources.lookup_helpers import RequestParentLookupHelper
from drf_nested_resources.routers import NestedResource
from drf_nested_resources.routers import Resource
from drf_nested_resources.routers import make_urlpatterns_from_resources
from tests._testcases import FixtureTestCase
from tests._testcases import TestCase
from tests._utils import TestClient
from tests._utils import make_response_for_request
class TestURLPatternGeneration(TestCase):
@staticmethod
def test_default_router():
resources = []
urlpatterns = make_urlpatterns_from_resources(resources)
eq_(2, len(urlpatterns))
url_path1 = reverse('api-root', urlconf=urlpatterns)
eq_('/', url_path1)
url_path2 = \
reverse('api-root', kwargs={'format': 'json'}, urlconf=urlpatterns)
eq_('/.json', url_path2)
@staticmethod
def test_resources_resolution_with_default_router():
resources = [Resource('developer', 'developers', DeveloperViewSet)]
urlpatterns = make_urlpatterns_from_resources(resources)
url_path = reverse('developer-list', urlconf=urlpatterns)
eq_('/developers/', url_path)
view_callable, view_args, view_kwargs = resolve(url_path, urlpatterns)
ok_(getattr(view_callable, 'is_fixture', False))
eq_((), view_args)
eq_({}, view_kwargs)
@staticmethod
def test_resources_resolution_with_custom_router():
resources = [Resource('developer', 'developers', DeveloperViewSet)]
urlpatterns = make_urlpatterns_from_resources(resources, SimpleRouter)
eq_(2, len(urlpatterns))
url_path1 = reverse('developer-list', urlconf=urlpatterns)
eq_('/developers/', url_path1)
url_path2 = reverse(
'developer-detail',
kwargs={'developer': 1},
urlconf=urlpatterns,
)
eq_('/developers/1/', url_path2)
@staticmethod
def test_resources_resolution_with_hyphenated_resource_name():
resources = \
[Resource('software-developer', 'developers', DeveloperViewSet)]
urlpatterns = make_urlpatterns_from_resources(resources)
url_path1 = reverse('software_developer-list', urlconf=urlpatterns)
eq_('/developers/', url_path1)
url_path2 = reverse(
'software_developer-detail',
kwargs={'software_developer': 1},
urlconf=urlpatterns,
)
eq_('/developers/1/', url_path2)
@staticmethod
def test_resources_resolution_with_invalid_resource_name():
resources = [Resource('2015developer', 'developers', DeveloperViewSet)]
with assert_raises(AssertionError):
make_urlpatterns_from_resources(resources)
@staticmethod
def test_nested_resources_resolution():
resources = [
Resource(
'developer',
'developers',
DeveloperViewSet,
[
NestedResource(
'language',
'languages',
ProgrammingLanguageViewSet,
parent_field_lookup='author',
),
],
),
]
urlpatterns = make_urlpatterns_from_resources(resources)
url_path = reverse(
'language-list',
kwargs={'developer': 1},
urlconf=urlpatterns,
)
eq_('/developers/1/languages/', url_path)
class TestDispatch(FixtureTestCase):
_RESOURCES = [
Resource(
'developer',
'developers',
DeveloperViewSet,
[
NestedResource(
'language',
'languages',
ProgrammingLanguageViewSet,
[
NestedResource(
'visit',
'visits',
WebsiteVisitViewSet,
parent_field_lookup='website__language',
),
NestedResource(
'version',
'versions',
ProgrammingLanguageVersionViewSet,
parent_field_lookup='language',
),
],
parent_field_lookup='author',
),
],
),
]
def test_parent_detail(self):
response = self._make_response_for_request(
'developer-detail',
{'developer': self.developer1.pk},
)
response_data = response.data
urlpatterns = make_urlpatterns_from_resources(self._RESOURCES)
expected_languages_url = reverse(
'language-list',
kwargs={'developer': self.developer1.pk},
urlconf=urlpatterns,
)
languages_url = response_data['programming_languages']
ok_(languages_url.endswith(expected_languages_url))
eq_(200, response.status_code)
def test_parent_list(self):
response = self._make_response_for_request('developer-list')
eq_(200, response.status_code)
def test_parent_list_mounted_on_different_url_path(self):
api_urls = list(make_urlpatterns_from_resources(self._RESOURCES))
urlpatterns = (url(r'^api/', include(api_urls)),)
client = TestClient(urlpatterns)
url_path = reverse('developer-list', urlconf=urlpatterns)
response = client.get(url_path)
eq_(200, response.status_code)
def test_non_existing_parent_detail(self):
response = self._make_response_for_request(
'developer-detail',
{'developer': self.non_existing_developer_pk},
)
eq_(404, response.status_code)
def test_child_detail(self):
view_kwargs = {
'developer': self.developer1.pk,
'language': self.programming_language1.pk,
}
response = \
self._make_response_for_request('language-detail', view_kwargs)
eq_(200, response.status_code)
def test_child_detail_inside_namespace(self):
namespace = 'v1'
api_urls = make_urlpatterns_from_resources(self._RESOURCES)
urlpatterns = _mount_urls_on_namespace(api_urls, namespace)
response = _make_request_to_namespaced_url(
namespace,
'language-detail',
{
'developer': self.developer1.pk,
'language': self.programming_language1.pk,
},
urlpatterns,
)
eq_(200, response.status_code)
def test_child_list(self):
response = self._make_response_for_request(
'language-list',
{'developer': self.developer1.pk},
)
eq_(200, response.status_code)
def test_child_detail_with_wrong_parent(self):
view_kwargs = {
'developer': self.developer1.pk,
'language': self.programming_language2.pk,
}
response = \
self._make_response_for_request('language-detail', view_kwargs)
eq_(404, response.status_code)
def test_child_detail_with_non_existing_parent(self):
view_kwargs = {
'developer': self.non_existing_developer_pk,
'language': self.programming_language1.pk,
}
response = \
self._make_response_for_request('language-detail', view_kwargs)
eq_(404, response.status_code)
def test_child_list_with_non_existing_parent(self):
response = self._make_response_for_request(
'language-list',
{'developer': self.non_existing_developer_pk},
)
eq_(404, response.status_code)
def test_child_detail_with_non_viewable_parent(self):
resources = [
Resource(
'website',
'websites',
_WebsiteViewSetWithCustomGetQueryset,
[
NestedResource(
'host',
'hosts',
WebsiteHostViewSet,
parent_field_lookup='websites',
),
],
),
]
view_kwargs = {
'website': self.website.pk,
'host': self.website_host.pk,
}
response = \
make_response_for_request('host-detail', view_kwargs, resources)
eq_(404, response.status_code)
def test_child_list_with_non_viewable_parent(self):
resources = [
Resource(
'website',
'websites',
_WebsiteViewSetWithCustomGetQueryset,
[
NestedResource(
'host',
'hosts',
WebsiteHostViewSet,
parent_field_lookup='websites',
),
],
),
]
response = make_response_for_request(
'host-list',
{'website': self.website.pk},
resources,
)
eq_(404, response.status_code)
def test_non_existing_child_detail(self):
view_kwargs = {
'developer': self.developer1.pk,
'language': self.non_existing_developer_pk,
}
response = \
self._make_response_for_request('language-detail', view_kwargs)
eq_(404, response.status_code)
def test_grand_child_detail(self):
view_kwargs = {
'developer': self.developer1.pk,
'language': self.programming_language1.pk,
'version': self.programming_language_version.pk,
}
response = \
self._make_response_for_request('version-detail', view_kwargs)
eq_(200, response.status_code)
def test_detail_with_non_existing_grandparent(self):
view_kwargs = {
'developer': self.non_existing_developer_pk,
'language': self.programming_language1.pk,
'version': self.programming_language_version.pk,
}
response = \
self._make_response_for_request('version-detail', view_kwargs)
eq_(404, response.status_code)
def test_indirect_relation_detail(self):
resources = [
Resource(
'developer',
'developers',
DeveloperViewSet2,
[
NestedResource(
'version',
'versions',
ProgrammingLanguageVersionViewSet,
parent_field_lookup='language__author',
),
],
),
]
view_kwargs = {
'developer': self.developer1.pk,
'version': self.programming_language_version.pk,
}
response = \
make_response_for_request('version-detail', view_kwargs, resources)
eq_(200, response.status_code)
def test_indirect_child_detail_via_one_to_one(self):
visit = WebsiteVisit.objects.create(website=self.website)
resources = [
Resource(
'developer',
'developers',
DeveloperViewSet,
[
NestedResource(
'language',
'languages',
ProgrammingLanguageViewSet,
[
NestedResource(
'visit',
'visits',
WebsiteVisitViewSet,
parent_field_lookup='website__language',
),
],
parent_field_lookup='author',
),
],
),
]
view_kwargs = {
'developer': self.developer1.pk,
'language': self.programming_language1.pk,
'visit': visit.pk,
}
response = \
make_response_for_request('visit-detail', view_kwargs, resources)
eq_(200, response.status_code)
def test_many_to_many_relationships(self):
resources = [
Resource(
'website',
'websites',
WebsiteViewSet,
[
NestedResource(
'host',
'hosts',
WebsiteHostViewSet,
parent_field_lookup=RequestParentLookupHelper(
'websites',
'website',
),
),
],
),
]
view_kwargs = {
'website': self.website.pk,
'host': self.website_host.pk,
}
response = \
make_response_for_request('host-detail', view_kwargs, resources)
eq_(200, response.status_code)
def test_reverse_many_to_many_relationships(self):
resources = [
Resource(
'host',
'hosts',
WebsiteHostViewSet,
[
NestedResource(
'website',
'websites',
WebsiteViewSet,
parent_field_lookup=RequestParentLookupHelper(
'hosts',
'host',
),
),
],
),
]
view_kwargs = {
'website': self.website.pk,
'host': self.website_host.pk,
}
response = \
make_response_for_request('website-detail', view_kwargs, resources)
eq_(200, response.status_code)
def _make_response_for_request(self, view_name, view_kwargs=None):
response = \
make_response_for_request(view_name, view_kwargs, self._RESOURCES)
return response
class _WebsiteViewSetWithCustomGetQueryset(WebsiteViewSet):
def get_queryset(self):
return Website.objects.none()
def _mount_urls_on_namespace(urls, namespace):
urls = list(urls)
urlpatterns = (
url(r'^{}/'.format(namespace), include((urls, 'app'), namespace)),
)
return urlpatterns
def _make_request_to_namespaced_url(namespace, url_name, url_kwargs, urlconf):
request_factory = APIRequestFactory(SERVER_NAME='example.org')
request = request_factory.get('/')
request.versioning_scheme = NamespaceVersioning()
request.version = namespace
url_path = reverse(
url_name,
kwargs=url_kwargs,
urlconf=urlconf,
request=request,
)
client = TestClient(urlconf)
response = client.get(url_path)
return response
``` |
{
"source": "2degrees/hubspot-contacts",
"score": 2
} |
#### File: hubspot/contacts/__init__.py
```python
from itertools import chain
from pyrecord import Record
from hubspot.contacts._constants import BATCH_SAVING_SIZE_LIMIT
from hubspot.contacts._constants import CONTACTS_API_SCRIPT_NAME
from hubspot.contacts._property_utils import get_property_type_by_property_name
from hubspot.contacts.generic_utils import ipaginate
from hubspot.contacts.request_data_formatters.contacts import \
format_contacts_data_for_saving
Contact = Record.create_type(
'Contact',
'vid',
'email_address',
'properties',
'related_contact_vids',
related_contact_vids=(),
)
_CONTACTS_SAVING_URL_PATH = CONTACTS_API_SCRIPT_NAME + '/contact/batch/'
def save_contacts(contacts, connection):
"""
Request the creation and/or update of the ``contacts``.
:param iterable contacts: The contacts to be created/updated
:return: ``None``
:raises hubspot.connection.exc.HubspotException:
:raises hubspot.contacts.exc.HubspotPropertyValueError: If one of the
property values on a contact is invalid.
For each contact, only its email address and properties are passed to
HubSpot. Any other datum (e.g., the VID) is ignored.
As at this writing, this end-point does not process the requested changes
immediately. Instead, it **partially** validates the input and, if it's all
correct, the requested changes are queued.
End-point documentation:
http://developers.hubspot.com/docs/methods/contacts/batch_create_or_update
"""
contacts_batches = ipaginate(contacts, BATCH_SAVING_SIZE_LIMIT)
contacts_first_batch = next(contacts_batches, None)
if not contacts_first_batch:
return
property_type_by_property_name = \
get_property_type_by_property_name(connection)
for contacts_batch in chain([contacts_first_batch], contacts_batches):
contacts_batch_data = format_contacts_data_for_saving(
contacts_batch,
property_type_by_property_name,
)
connection.send_post_request(
_CONTACTS_SAVING_URL_PATH,
contacts_batch_data,
)
``` |
{
"source": "2degrees/twapi-authn",
"score": 2
} |
#### File: twapi-authn/tests/test_authn.py
```python
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import eq_
from nose.tools import ok_
from tests.utils import get_uuid4_str
from twapi_authn import AccessTokenError
from twapi_authn import claim_access_token
from twapi_authn import is_session_active
from twapi_connection.exc import NotFoundError
from twapi_connection.testing import MockConnection, MockResponse
from twapi_connection.testing import SuccessfulAPICall
from twapi_connection.testing import UnsuccessfulAPICall
class TestAuthnTokenClaiming(object):
def test_valid_token(self):
expected_user_id = 1
access_token = get_uuid4_str()
path_info = '/sessions/{}/'.format(access_token)
api_call = SuccessfulAPICall(
path_info,
'POST',
response=MockResponse(expected_user_id),
)
with _make_connection(api_call) as connection:
user_id = claim_access_token(connection, access_token)
eq_(expected_user_id, user_id)
def test_invalid_token(self):
access_token = get_uuid4_str()
path_info = '/sessions/{}/'.format(access_token)
api_call = UnsuccessfulAPICall(
path_info,
'POST',
exception=NotFoundError(),
)
with assert_raises(AccessTokenError):
with _make_connection(api_call) as connection:
claim_access_token(connection, access_token)
class TestSessionIsActive(object):
def test_active_session(self):
access_token = get_uuid4_str()
path_info = '/sessions/{}/'.format(access_token)
api_call = SuccessfulAPICall(
path_info,
'HEAD',
response=MockResponse(None),
)
with _make_connection(api_call) as connection:
is_active = is_session_active(connection, access_token)
ok_(is_active)
def test_inactive_session(self):
access_token = get_uuid4_str()
path_info = '/sessions/{}/'.format(access_token)
api_call = UnsuccessfulAPICall(
path_info,
'HEAD',
exception=NotFoundError(),
)
with _make_connection(api_call) as connection:
is_active = is_session_active(connection, access_token)
assert_false(is_active)
def _make_connection(api_call):
connection = MockConnection(lambda: [api_call])
return connection
``` |
{
"source": "2degrees/twapi-connection",
"score": 2
} |
#### File: twapi-connection/twapi_connection/testing.py
```python
from pyrecord import Record
APICall = Record.create_type(
'APICall',
'url',
'http_method',
'query_string_args',
'request_body_deserialization',
query_string_args=None,
request_body_deserialization=None,
)
SuccessfulAPICall = APICall.extend_type('SuccessfulAPICall', 'response')
UnsuccessfulAPICall = APICall.extend_type('UnsuccessfulAPICall', 'exception')
class MockConnection(object):
"""Mock representation of a :class:`~twapi.Connection`"""
def __init__(self, *api_calls_simulators):
super(MockConnection, self).__init__()
self._expected_api_calls = []
for api_calls_simulator in api_calls_simulators:
for api_call in api_calls_simulator():
self._expected_api_calls.append(api_call)
self._request_count = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return
expected_api_call_count = len(self._expected_api_calls)
pending_api_call_count = expected_api_call_count - self._request_count
error_message = \
'{} more requests were expected'.format(pending_api_call_count)
assert expected_api_call_count == self._request_count, error_message
def send_get_request(self, url, query_string_args=None):
return self._call_remote_method(url, 'GET', query_string_args)
def send_head_request(self, url, query_string_args=None):
return self._call_remote_method(url, 'HEAD', query_string_args)
def send_post_request(self, url, body_deserialization=None):
return self._call_remote_method(
url,
'POST',
request_body_deserialization=body_deserialization,
)
def send_put_request(self, url, body_deserialization):
return self._call_remote_method(
url,
'PUT',
request_body_deserialization=body_deserialization,
)
def send_delete_request(self, url):
return self._call_remote_method(url, 'DELETE')
def _call_remote_method(
self,
url,
http_method,
query_string_args=None,
request_body_deserialization=None,
):
self._require_enough_api_calls(url)
expected_api_call = self._expected_api_calls[self._request_count]
_assert_request_matches_api_call(
expected_api_call,
url,
http_method,
query_string_args,
request_body_deserialization,
)
self._request_count += 1
if isinstance(expected_api_call, UnsuccessfulAPICall):
raise expected_api_call.exception
return expected_api_call.response
@property
def api_calls(self):
api_calls = self._expected_api_calls[:self._request_count]
return api_calls
def _require_enough_api_calls(self, url):
are_enough_api_calls = \
self._request_count < len(self._expected_api_calls)
error_message = 'Not enough API calls for new requests ' \
'(requested {!r})'.format(url)
assert are_enough_api_calls, error_message
class MockResponse:
def __init__(self, body_deserialization, headers=None):
self._body_deserialization = body_deserialization
self.headers = headers or {}
def json(self):
return self._body_deserialization
def _assert_request_matches_api_call(
api_call,
url,
http_method,
query_string_args,
request_body_deserialization,
):
urls_match = api_call.url == url
assert urls_match, 'Expected URL {!r}, got {!r}'.format(api_call.url, url)
query_string_args_match = api_call.query_string_args == query_string_args
assert query_string_args_match, \
'Expected query string arguments {!r}, got {!r}'.format(
api_call.query_string_args,
query_string_args,
)
http_methods_match = api_call.http_method == http_method
assert http_methods_match, \
'Expected HTTP method {!r}, got {!r}'.format(
api_call.http_method,
http_method,
)
request_body_deserializations_match = \
api_call.request_body_deserialization == request_body_deserialization
assert request_body_deserializations_match, \
'Expected request body deserialization {!r}, got {!r}'.format(
api_call.request_body_deserialization,
request_body_deserialization,
)
``` |
{
"source": "2deviant/Mathematica-Trees",
"score": 3
} |
#### File: 2deviant/Mathematica-Trees/converters.py
```python
def _mathematica_line_segments(tree):
"""
Produce Mathematica graphics object elements.
"""
for branch in tree:
[depth, [[x0, y0], [x1, y1]]] = branch
yield '{{Thickness[{}/300.], Line[{{{{{},{}}},{{{},{}}}}}]}}'.format(
depth, x0, y0, x1, y1
)
def to_mathematica(tree):
"""
Produce Mathematica code to draw the tree.
"""
segments = [segment for segment in _mathematica_line_segments(tree)]
code = 'tree = {{\n{}\n}};\n\nShow[Graphics[tree], AspectRatio -> 1, PlotRange -> All]\n'.format(
',\n'.join(segments)
)
return code
``` |
{
"source": "2Dooh/TF-MOENAS",
"score": 3
} |
#### File: model/bench101/model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
from .base_ops import *
import torch
import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self,
spec,
num_labels,
in_channels=3,
stem_out_channels=128,
num_stack=3,
num_modules_per_stack=3,
use_stem=True,
**kwargs):
super(Network, self).__init__()
self.layers = nn.ModuleList([])
in_channels = in_channels
out_channels = stem_out_channels
# initial stem convolution
stem_conv = ConvBnRelu(in_channels, out_channels, 3, 1, 1) if use_stem else nn.Identity()
self.layers.append(stem_conv)
in_channels = out_channels if use_stem else in_channels
for stack_num in range(num_stack):
if stack_num > 0:
downsample = nn.MaxPool2d(kernel_size=2, stride=2)
self.layers.append(downsample)
out_channels *= 2
for module_num in range(num_modules_per_stack):
cell = Cell(spec, in_channels, out_channels)
self.layers.append(cell)
in_channels = out_channels
self.classifier = nn.Linear(out_channels, num_labels)
self._initialize_weights()
def forward(self, x):
for _, layer in enumerate(self.layers):
x = layer(x)
out = torch.mean(x, (2, 3))
out = self.classifier(out)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class Cell(nn.Module):
"""
Builds the model using the adjacency matrix and op labels specified. Channels
controls the module output channel count but the interior channels are
determined via equally splitting the channel count whenever there is a
concatenation of Tensors.
"""
def __init__(self, spec, in_channels, out_channels):
super(Cell, self).__init__()
self.spec = spec
self.num_vertices = np.shape(self.spec.matrix)[0]
# vertex_channels[i] = number of output channels of vertex i
self.vertex_channels = ComputeVertexChannels(in_channels, out_channels, self.spec.matrix)
#self.vertex_channels = [in_channels] + [out_channels] * (self.num_vertices - 1)
# operation for each node
self.vertex_op = nn.ModuleList([None])
for t in range(1, self.num_vertices-1):
op = OP_MAP[spec.ops[t]](self.vertex_channels[t], self.vertex_channels[t])
self.vertex_op.append(op)
# operation for input on each vertex
self.input_op = nn.ModuleList([None])
for t in range(1, self.num_vertices):
if self.spec.matrix[0, t]:
self.input_op.append(Projection(in_channels, self.vertex_channels[t]))
else:
self.input_op.append(None)
def forward(self, x):
tensors = [x]
out_concat = []
for t in range(1, self.num_vertices-1):
fan_in = [Truncate(tensors[src], self.vertex_channels[t]) for src in range(1, t) if self.spec.matrix[src, t]]
if self.spec.matrix[0, t]:
fan_in.append(self.input_op[t](x))
# perform operation on node
#vertex_input = torch.stack(fan_in, dim=0).sum(dim=0)
vertex_input = sum(fan_in)
#vertex_input = sum(fan_in) / len(fan_in)
vertex_output = self.vertex_op[t](vertex_input)
tensors.append(vertex_output)
if self.spec.matrix[t, self.num_vertices-1]:
out_concat.append(tensors[t])
if not out_concat:
assert self.spec.matrix[0, self.num_vertices-1]
outputs = self.input_op[self.num_vertices-1](tensors[0])
else:
if len(out_concat) == 1:
outputs = out_concat[0]
else:
outputs = torch.cat(out_concat, 1)
if self.spec.matrix[0, self.num_vertices-1]:
outputs += self.input_op[self.num_vertices-1](tensors[0])
#if self.spec.matrix[0, self.num_vertices-1]:
# out_concat.append(self.input_op[self.num_vertices-1](tensors[0]))
#outputs = sum(out_concat) / len(out_concat)
return outputs
def Projection(in_channels, out_channels):
"""1x1 projection (as in ResNet) followed by batch normalization and ReLU."""
return ConvBnRelu(in_channels, out_channels, 1)
def Truncate(inputs, channels):
"""Slice the inputs to channels if necessary."""
input_channels = inputs.size()[1]
if input_channels < channels:
raise ValueError('input channel < output channels for truncate')
elif input_channels == channels:
return inputs # No truncation necessary
else:
# Truncation should only be necessary when channel division leads to
# vertices with +1 channels. The input vertex should always be projected to
# the minimum channel count.
assert input_channels - channels == 1
return inputs[:, :channels, :, :]
def ComputeVertexChannels(in_channels, out_channels, matrix):
"""Computes the number of channels at every vertex.
Given the input channels and output channels, this calculates the number of
channels at each interior vertex. Interior vertices have the same number of
channels as the max of the channels of the vertices it feeds into. The output
channels are divided amongst the vertices that are directly connected to it.
When the division is not even, some vertices may receive an extra channel to
compensate.
Returns:
list of channel counts, in order of the vertices.
"""
num_vertices = np.shape(matrix)[0]
vertex_channels = [0] * num_vertices
vertex_channels[0] = in_channels
vertex_channels[num_vertices - 1] = out_channels
if num_vertices == 2:
# Edge case where module only has input and output vertices
return vertex_channels
# Compute the in-degree ignoring input, axis 0 is the src vertex and axis 1 is
# the dst vertex. Summing over 0 gives the in-degree count of each vertex.
in_degree = np.sum(matrix[1:], axis=0)
# print(in_channels)
# print(out_channels)
# print(in_degree[num_vertices - 1])
interior_channels = out_channels // in_degree[num_vertices - 1]
# interior_channels = 1 if interior_channels == 0 else interior_channels
correction = out_channels % in_degree[num_vertices - 1] # Remainder to add
# Set channels of vertices that flow directly to output
for v in range(1, num_vertices - 1):
if matrix[v, num_vertices - 1]:
vertex_channels[v] = interior_channels
if correction:
vertex_channels[v] += 1
correction -= 1
# Set channels for all other vertices to the max of the out edges, going
# backwards. (num_vertices - 2) index skipped because it only connects to
# output.
for v in range(num_vertices - 3, 0, -1):
if not matrix[v, num_vertices - 1]:
for dst in range(v + 1, num_vertices - 1):
if matrix[v, dst]:
vertex_channels[v] = max(vertex_channels[v], vertex_channels[dst])
assert vertex_channels[v] > 0
# Sanity check, verify that channels never increase and final channels add up.
final_fan_in = 0
for v in range(1, num_vertices - 1):
if matrix[v, num_vertices - 1]:
final_fan_in += vertex_channels[v]
for dst in range(v + 1, num_vertices - 1):
if matrix[v, dst]:
assert vertex_channels[v] >= vertex_channels[dst]
assert final_fan_in == out_channels or num_vertices == 2
# num_vertices == 2 means only input/output nodes, so 0 fan-in
return vertex_channels
```
#### File: custom_modules/nas_unet/unet_cell.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class DownSC(nn.Module):
def __init__(self):
super().__init__()
def forward():
pass
pass
class UpSC(nn.Module):
def __init__(self):
super().__init__()
def forward():
pass
pass
```
#### File: custom_modules/nsga_net/nsga_net_phase.py
```python
from .nsga_net_node import *
class DensePhase(Module):
pass
class ResidualPhase(Module):
def __init__(self,
supernet,
encoder,
in_channels,
out_channels,
kernel_size,
idx,
preact=False):
super(ResidualPhase, self).__init__()
self.channel_flag = in_channels != out_channels
self.first_conv = nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=False)
self.dependency_graph = ResidualPhase.build_dependency_graph(encoder)
node_type = 'res' if not preact else 'res_pre'
nodes = []
for i in range(len(encoder)):
if len(self.dependency_graph[i+1]) > 0:
nodes.append(supernet.module_dict[''])
@staticmethod
def build_dependency_graph(self, encoder):
pass
def forward(self, x):
if self.channel_flag:
x = self.first_conv(x)
```
#### File: optimizer/EA/ea_agent.py
```python
import pymoo.factory as factory
from pymoo.core.repair import NoRepair
from pymoo.core.duplicate import NoDuplicateElimination, DefaultDuplicateElimination
# from pymoo.model.repair import NoRepair
# from pymoo.model.duplicate import NoDuplicateElimination, DefaultDuplicateElimination
from optimizer.EA.base import AgentBase
import torch
import copy
import procedure.problem as problem
import procedure.operator.duplicate as duplicate
import procedure.operator.repair as repair
import os
from util.prepare_seed import prepare_seed
class EvoAgent(AgentBase):
def __init__(self, cfg, seed=0, **kwargs):
super().__init__(cfg, **kwargs)
self.seed = seed
self.cfg = cfg
op_kwargs = self.__build_model_operators(cfg.operators)
self.model_ori = factory.get_algorithm(
name=cfg.algorithm.name,
**cfg.algorithm.kwargs,
**op_kwargs
)
self.model = None
def _initialize(self, **kwargs):
prepare_seed(self.seed)
self.model = copy.deepcopy(self.model_ori)
try:
problem = factory.get_problem(
self.cfg.problem.name,
**self.cfg.problem.kwargs
)
except:
problem = eval(self.cfg.problem.name)(
**self.cfg.problem.kwargs
)
termination = factory.get_termination(
self.cfg.termination.name,
**self.cfg.termination.kwargs
)
self.model.setup(
problem,
termination,
seed=self.seed,
save_history=False
)
if 'checkpoint' in self.config:
self._load_checkpoint(f=self.config.checkpoint)
def _load_checkpoint(self, **kwargs):
try:
ckp = super()._load_checkpoint(torch, cmd=None, **kwargs)
except:
self.logger.warn('Checkpoint not found, proceed algorithm from scratch!')
return
self.model = ckp['model']
self.cfg = ckp['cfg']
def __build_model_operators(self, cfg):
op_dict = {
'repair': NoRepair(),
'eliminate_duplicates': NoDuplicateElimination()
}
op2ctor = {
'sampling': factory.get_sampling,
'crossover': factory.get_crossover,
'mutation': factory.get_mutation,
'ref_dirs': factory.get_reference_directions
}
for key, val in cfg.items():
try:
op_dict[key] = op2ctor[key](val.name, **val.kwargs)
except Exception as e:
op_dict[key] = eval(val.name)(**val.kwargs)
return op_dict
def _finalize(self, **kwargs):
result = self.model.result()
torch.save(result, f=os.path.join(self.config.out_dir, 'result.pth.tar'))
```
#### File: EA/util/callback_handler.py
```python
import logging
class CallbackHandler:
def __init__(self, callbacks=None, summary_writer=None) -> None:
self.summary_writer = summary_writer
self.callbacks = callbacks if callbacks else []
self.logger = logging.getLogger(self.__class__.__name__)
self.msg = 'gen {}, n_eval {}: {}'
self.agent = None
def begin_fit(self, agent, **kwargs):
self.agent = agent
msgs = []
for callback in self.callbacks:
msg = callback._begin_fit(
agent=agent,
callbacks=self.callbacks,
summary_writer=self.summary_writer,
**kwargs
)
if msg:
msgs += [msg]
if len(msgs) > 0:
self.logger.info(self.msg.format(
self.agent.model.n_gen,
self.agent.model.evaluator.n_eval,
str(msgs)
))
def after_fit(self, **kwargs):
msgs = []
for callback in self.callbacks:
msg = callback._after_fit(**kwargs)
if msg:
msgs += [msg]
if len(msgs) > 0:
self.logger.info(self.msg.format(
self.agent.model.n_gen,
self.agent.model.evaluator.n_eval,
str(msgs)
))
def begin_next(self, **kwargs):
msgs = []
for callback in self.callbacks:
msg = callback._begin_next(**kwargs)
if msg:
msgs += [msg]
if len(msgs) > 0:
self.logger.info(self.msg.format(
self.agent.model.n_gen,
self.agent.model.evaluator.n_eval,
str(msgs)
))
def after_next(self, **kwargs):
msgs = []
for callback in self.callbacks:
msg = callback._after_next(**kwargs)
if msg:
msgs += [msg]
if len(msgs) > 0:
self.logger.info(self.msg.format(
self.agent.model.n_gen,
self.agent.model.evaluator.n_eval,
str(msgs)
))
```
#### File: problem/base/base.py
```python
from abc import abstractmethod
from typing import OrderedDict
from pymoo.core.problem import ElementwiseProblem
from util.MOEA.elitist_archive import ElitistArchive
import numpy as np
import logging
class NAS(ElementwiseProblem):
def __init__(self,
pf_dict=None,
pf_path=None,
verbose=True,
filter_duplicate_by_key=True,
**kwargs):
# super().__init__(elementwise_evaluation=True, **kwargs)
super().__init__(**kwargs)
self.verbose = verbose
self.logger = logging.getLogger(self.__class__.__name__)
self.history = OrderedDict({
'eval': OrderedDict(),
'runtime': OrderedDict()
})
self.archive = {}
self.elitist_archive = ElitistArchive(self.archive, verbose, filter_duplicate_by_key=filter_duplicate_by_key)
self.msg = '[{:0>2d}/{:0>2d}]: time={:.3f}s, '
self.counter = 0
self.pf_path = pf_path
self.pf_dict = pf_dict
def _evaluate(self, x, out, algorithm, *args, **kwargs):
self.counter += 1
genotype = self._decode(x)
key = tuple(x.tolist())
if key in self.history['eval']:
out['F'] = self.history['eval'][key]
self.elitist_archive.insert(x, out['F'], key)
self.logger.info('Re-evaluated arch: {}'.format(key))
return
F, runtime = self._calc_F(genotype)
out['F'] = np.column_stack(F)
if self.verbose:
count = self.counter % algorithm.pop_size
self.logger.info(self.msg.format(
algorithm.pop_size if count == 0 else count,
algorithm.pop_size,
runtime,
*F
))
self.history['eval'][key] = out['F']
n_gen = algorithm.n_gen
if n_gen not in self.history['runtime']:
self.history['runtime'][n_gen] = []
self.history['runtime'][n_gen] += [runtime]
self.elitist_archive.insert(x, out['F'], key)
def _convert_to_pf_space(self, X, **kwargs):
pass
@abstractmethod
def _decode(self, **kwargs):
raise NotImplementedError
def _calc_F(self, genotype, **kwargs):
raise NotImplementedError
def _calc_pareto_front(self, *args, **kwargs):
pf = np.load(self.pf_path)
return pf
```
#### File: problem/base/bench101.py
```python
from os import path
from procedure.problem.base import base
import numpy as np
import torch
from lib.api.bench101.api import NASBench, ModelSpec
import os
from os.path import expanduser
class Bench101(base.NAS):
INPUT = 'input'
OUTPUT = 'output'
CONV3X3 = 'conv3x3-bn-relu'
CONV1X1 = 'conv1x1-bn-relu'
MAXPOOL3X3 = 'maxpool3x3'
NUM_VERTICES = 7
ALLOWED_OPS = [CONV3X3, CONV1X1, MAXPOOL3X3]
EDGE_SPOTS = NUM_VERTICES * (NUM_VERTICES - 1) // 2 # Upper triangular matrix
OP_SPOTS = NUM_VERTICES - 2 # Input/output vertices are fixed
def __init__(self,
path,
net_cfg,
epoch=36,
**kwargs):
edge_ub = np.ones(self.EDGE_SPOTS)
edge_lwb = np.zeros(self.EDGE_SPOTS)
op_ub = np.ones(self.OP_SPOTS) * max(range(len(self.ALLOWED_OPS)))
op_lwb = np.zeros(self.OP_SPOTS)
super().__init__(
n_var=self.EDGE_SPOTS+self.OP_SPOTS,
xl=np.concatenate([edge_lwb, op_lwb]),
xu=np.concatenate([edge_ub, op_ub]),
**kwargs
)
self.net_cfg = net_cfg
self.epoch = epoch
self.path = path
if '~' in path:
self.path = os.path.join(expanduser('~'), path[2:])
self.api = NASBench(self.path)
def __getstate__(self):
state_dict = dict(self.__dict__)
del state_dict['api']
return state_dict
def __setstate__(self, state_dict):
self.__dict__ = state_dict
self.api = NASBench(self.path)
def _decode(self, x):
dag, ops = np.split(x, [self.EDGE_SPOTS])
matrix = np.zeros((self.NUM_VERTICES, self.NUM_VERTICES))
iu = np.triu_indices(self.NUM_VERTICES, 1)
matrix[iu] = dag
ops = np.array(self.ALLOWED_OPS)[ops.astype(np.int)].tolist()
return matrix.astype(np.int), [self.INPUT] + ops + [self.OUTPUT]
```
#### File: util/MOEA/elitist_archive.py
```python
from pymoo.util.nds.non_dominated_sorting import find_non_dominated
import numpy as np
import logging
class ElitistArchive:
def __init__(self, archive, verbose=True, filter_duplicate_by_key=True) -> None:
self.archive = archive
self.verbose = verbose
self.logger = logging.getLogger(self.__class__.__name__)
self.filter_duplicate_by_key = filter_duplicate_by_key
def get(self, key):
return self.archive[key]
def __acceptance_test(self, f, key):
if len(self.archive) == 0:
return True
elif not self.__is_duplicate(f, key) and\
len(find_non_dominated(f, self.archive['F'])) > 0:
return True
else:
return False
def __is_duplicate(self, f, key):
if self.filter_duplicate_by_key:
return key in self.archive['keys']
else:
return f.tolist() in self.archive['F'].tolist()
def insert(self, x, f, key):
if self.__acceptance_test(f, key):
if len(self.archive) == 0:
self.archive.update({
'X': x,
'F': f,
'keys': [key]
})
else:
keys = np.row_stack([self.archive['keys'], key])
X = np.row_stack([self.archive['X'], x])
F = np.row_stack([self.archive['F'], f])
I = find_non_dominated(F, F)
self.archive.update({
'X': X[I],
'F': F[I],
'keys': keys[I].tolist()
})
if self.verbose:
self.logger.info('Current archive size: {}'.format(len(self.archive['F'])))
return True
return False
``` |
{
"source": "2Dsharp/college",
"score": 2
} |
#### File: Processing/sketch_3DLighting/sketch_3DLighting.pyde
```python
ry = 0
def setup():
size(800, 800, P3D)
global obj, texture1
texture1 = loadImage("texture.jpg")
obj = loadShape("man.obj")
def draw():
global ry
background(0)
lights()
translate(width / 2, height / 2 + 200, -200)
rotateZ(PI)
rotateY(ry)
scale(25)
# Orange point light on the right
pointLight(150, 100, 0, # Color
200, -150, 0) # Position
# Blue directional light from the left
directionalLight(0, 102, 255, # Color
1, 0, 0) # The x-, y-, z-axis direction
# Yellow spotlight from the front
spotLight(255, 255, 109, # Color
0, 40, 200, # Position
0, 10, 5, # Direction
90, 2) # Angle, concentration
ambientLight(255, 0, 0);
texture(texture1)
shape(obj)
box(100, 100, 200)
ry += 0.02
``` |
{
"source": "2DU/NamuMark-Table-To-MediaWiki",
"score": 3
} |
#### File: 2DU/NamuMark-Table-To-MediaWiki/app.py
```python
from bottle import route, run, error, request
import re
def redirect(data):
return('<meta http-equiv="refresh" content="0;url=' + data + '" />')
def table_p(d, d2):
alltable = 'style="'
celstyle = 'style="'
rowstyle = 'style="'
row = ''
cel = ''
table_w = re.search("<table\s?width=((?:(?!>).)*)>", d)
table_h = re.search("<table\s?height=((?:(?!>).)*)>", d)
table_a = re.search("<table\s?align=((?:(?!>).)*)>", d)
if(table_w):
alltable += 'width: ' + table_w.groups()[0] + ';'
if(table_h):
alltable += 'height: ' + table_h.groups()[0] + ';'
if(table_a):
if(table_a.groups()[0] == 'right'):
alltable += 'float: right;'
elif(table_a.groups()[0] == 'center'):
alltable += 'margin: auto;'
table_t_a = re.search("<table\s?textalign=((?:(?!>).)*)>", d)
if(table_t_a):
if(table_t_a.groups()[0] == 'right'):
alltable += 'text-align: right;'
elif(table_t_a.groups()[0] == 'center'):
alltable += 'text-align: center;'
row_t_a = re.search("<row\s?textalign=((?:(?!>).)*)>", d)
if(row_t_a):
if(row_t_a.groups()[0] == 'right'):
rowstyle += 'text-align: right;'
elif(row_t_a.groups()[0] == 'center'):
rowstyle += 'text-align: center;'
else:
rowstyle += 'text-align: left;'
table_cel = re.search("<-((?:(?!>).)*)>", d)
if(table_cel):
cel = 'colspan="' + table_cel.groups()[0] + '"'
else:
cel = 'colspan="' + str(round(len(d2) / 2)) + '"'
table_row = re.search("<\|((?:(?!>).)*)>", d)
if(table_row):
row = 'rowspan="' + table_row.groups()[0] + '"'
row_bgcolor_1 = re.search("<rowbgcolor=(#[0-9a-f-A-F]{6})>", d)
row_bgcolor_2 = re.search("<rowbgcolor=(#[0-9a-f-A-F]{3})>", d)
row_bgcolor_3 = re.search("<rowbgcolor=(\w+)>", d)
if(row_bgcolor_1):
rowstyle += 'background: ' + row_bgcolor_1.groups()[0] + ';'
elif(row_bgcolor_2):
rowstyle += 'background: ' + row_bgcolor_2.groups()[0] + ';'
elif(row_bgcolor_3):
rowstyle += 'background: ' + row_bgcolor_3.groups()[0] + ';'
table_border_1 = re.search("<table\s?bordercolor=(#[0-9a-f-A-F]{6})>", d)
table_border_2 = re.search("<table\s?bordercolor=(#[0-9a-f-A-F]{3})>", d)
table_border_3 = re.search("<table\s?bordercolor=(\w+)>", d)
if(table_border_1):
alltable += 'border: ' + table_border_1.groups()[0] + ' 2px solid;'
elif(table_border_2):
alltable += 'border: ' + table_border_2.groups()[0] + ' 2px solid;'
elif(table_border_3):
alltable += 'border: ' + table_border_3.groups()[0] + ' 2px solid;'
table_bgcolor_1 = re.search("<table\s?bgcolor=(#[0-9a-f-A-F]{6})>", d)
table_bgcolor_2 = re.search("<table\s?bgcolor=(#[0-9a-f-A-F]{3})>", d)
table_bgcolor_3 = re.search("<table\s?bgcolor=(\w+)>", d)
if(table_bgcolor_1):
alltable += 'background: ' + table_bgcolor_1.groups()[0] + ';'
elif(table_bgcolor_2):
alltable += 'background: ' + table_bgcolor_2.groups()[0] + ';'
elif(table_bgcolor_3):
alltable += 'background: ' + table_bgcolor_3.groups()[0] + ';'
bgcolor_1 = re.search("<bgcolor=(#[0-9a-f-A-F]{6})>", d)
bgcolor_2 = re.search("<bgcolor=(#[0-9a-f-A-F]{3})>", d)
bgcolor_3 = re.search("<bgcolor=(\w+)>", d)
if(bgcolor_1):
celstyle += 'background: ' + bgcolor_1.groups()[0] + ';'
elif(bgcolor_2):
celstyle += 'background: ' + bgcolor_2.groups()[0] + ';'
elif(bgcolor_3):
celstyle += 'background: ' + bgcolor_3.groups()[0] + ';'
st_bgcolor_1 = re.search("<(#[0-9a-f-A-F]{6})>", d)
st_bgcolor_2 = re.search("<(#[0-9a-f-A-F]{3})>", d)
st_bgcolor_3 = re.search("<(\w+)>", d)
if(st_bgcolor_1):
celstyle += 'background: ' + st_bgcolor_1.groups()[0] + ';'
elif(st_bgcolor_2):
celstyle += 'background: ' + st_bgcolor_2.groups()[0] + ';'
elif(st_bgcolor_3):
celstyle += 'background: ' + st_bgcolor_3.groups()[0] + ';'
n_width = re.search("<width=((?:(?!>).)*)>", d)
n_height = re.search("<height=((?:(?!>).)*)>", d)
if(n_width):
celstyle += 'width: ' + n_width.groups()[0] + ';'
if(n_height):
celstyle += 'height: ' + n_height.groups()[0] + ';'
text_right = re.search("<\)>", d)
text_center = re.search("<:>", d)
text_left = re.search("<\(>", d)
if(text_right):
celstyle += 'text-align: right;'
elif(text_center):
celstyle += 'text-align: center;'
elif(text_left):
celstyle += 'text-align: left;'
alltable += '"'
celstyle += '"'
rowstyle += '"'
return([alltable, rowstyle, celstyle, row, cel])
def namumark(data):
data = re.sub('<', '<', data)
data = re.sub('>', '>', data)
data = re.sub('"', '"', data)
data = re.sub("(?:\|\|\r\n)", "#table#<tablenobr>", data)
while(1):
y = re.search("(\|\|(?:(?:(?:(?:(?!\|\|).)*)(?:\n?))+))", data)
if(y):
a = y.groups()
mid_data = re.sub("\|\|", "#table#", a[0])
mid_data = re.sub("\r\n", "<br>", mid_data)
data = re.sub("(\|\|((?:(?:(?:(?!\|\|).)*)(?:\n?))+))", mid_data, data, 1)
else:
break
data = re.sub("#table#", "||", data)
data = re.sub("<tablenobr>", "\r\n", data)
while(1):
m = re.search("(\|\|(?:(?:(?:.*)\n?)\|\|)+)", data)
if(m):
results = m.groups()
table = results[0]
while(1):
a = re.search("^(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", table)
if(a):
row = ''
cel = ''
celstyle = ''
rowstyle = ''
alltable = ''
table_d = ''
result = a.groups()
if(result[1]):
table_d = table_p(result[1], result[0])
alltable = table_d[0]
rowstyle = table_d[1]
celstyle = table_d[2]
row = table_d[3]
cel = table_d[4]
table = re.sub("^(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", "{| class='wikitable' " + alltable + "\n|- " + rowstyle + "\n| " + cel + " " + row + " " + celstyle + " | ", table, 1)
else:
cel = 'colspan="' + str(round(len(result[0]) / 2)) + '"'
table = re.sub("^(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", "{| class='wikitable'\n| " + cel + " | ", table, 1)
else:
break
table = re.sub("\|\|$", "</td> \
</tr> \
</tbody> \
</table>", table)
while(1):
b = re.search("\|\|\r\n(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", table)
if(b):
row = ''
cel = ''
celstyle = ''
rowstyle = ''
table_d = ''
result = b.groups()
if(result[1]):
table_d = table_p(result[1], result[0])
rowstyle = table_d[1]
celstyle = table_d[2]
row = table_d[3]
cel = table_d[4]
table = re.sub("\|\|\r\n(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", "\n|- " + rowstyle + "\n| " + cel + " " + row + " " + celstyle + " | ", table, 1)
else:
cel = 'colspan="' + str(round(len(result[0]) / 2)) + '"'
table = re.sub("\|\|\r\n(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", "\n|-\n| " + cel + " | ", table, 1)
else:
break
while(1):
c = re.search("(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", table)
if(c):
row = ''
cel = ''
celstyle = ''
table_d = ''
result = c.groups()
if(result[1]):
table_d = table_p(result[1], result[0])
celstyle = table_d[2]
row = table_d[3]
cel = table_d[4]
table = re.sub("(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", "\n| " + cel + " " + row + " " + celstyle + " | ", table, 1)
else:
cel = 'colspan="' + str(round(len(result[0]) / 2)) + '"'
table = re.sub("(\|\|(?:(?:\|\|)+)?)((?:<(?:(?:(?!>).)*)>)+)?", "\n| " + cel + " | ", table, 1)
else:
break
table += '\n|}'
data = re.sub("(\|\|(?:(?:(?:.*)\n?)\|\|)+)", table, data, 1)
else:
break
data = re.sub("(\n<nobr>|<nobr>\n|<nobr>)", "", data)
data = re.sub('\n', '<br>', data)
return(data)
@route('/', method=['POST', 'GET'])
def start():
if(request.method == 'POST'):
data = '<html> \
<body> \
<a href="https://github.com/2DU/NamuMark-Table-To-MediaWiki">깃 허브</a> <a href="http://namu.ml/w/온마크">문법</a> \
<br> \
<form action="/" method="POST"> \
<textarea style="width: 100%; height: 500px;" name="data">' + request.POST.data + '</textarea> \
<br> \
<input value="변환" type="submit"> \
</form> \
<br> \
' + namumark(request.POST.data) + ' \
</body> \
</html>'
else:
data = '<html> \
<body> \
<a href="https://github.com/2DU/NamuMark-Table-To-MediaWiki">깃 허브</a> <a href="http://namu.ml/w/온마크">문법</a> \
<br> \
<form action="/" method="POST"> \
<textarea style="width: 100%; height: 500px;" name="data"></textarea> \
<br> \
<input value="변환" type="submit"> \
</form> \
</body> \
</html>'
return(data)
@error(404)
def error_404(error):
return(redirect('/'))
run(
host = '0.0.0.0',
server = 'tornado',
port = 3000
)
``` |
{
"source": "2DU/openNAMU-PYnamu",
"score": 2
} |
#### File: openNAMU-PYnamu/route/main_func_setting_external.py
```python
from .tool.func import *
def main_func_setting_external():
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
i_list = [
'recaptcha',
'sec_re',
'smtp_server',
'smtp_port',
'smtp_security',
'smtp_email',
'smtp_pass',
'recaptcha_ver',
'oauth_client_id',
'email_have'
]
if flask.request.method == 'POST':
for data in i_list:
into_data = flask.request.form.get(data, '')
curs.execute(db_change("update other set data = ? where name = ?"), [into_data, data])
conn.commit()
admin_check(None, 'edit_set (external)')
return redirect('/setting/external')
else:
d_list = []
x = 0
for i in i_list:
curs.execute(db_change('select data from other where name = ?'), [i])
sql_d = curs.fetchall()
if sql_d:
d_list += [sql_d[0][0]]
else:
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [i, ''])
d_list += ['']
x += 1
conn.commit()
security_radios = ''
for i in ['tls', 'starttls', 'plain']:
if d_list[4] == i:
security_radios = '<option value="' + i + '">' + i + '</option>' + security_radios
else:
security_radios += '<option value="' + i + '">' + i + '</option>'
re_ver_list = {
'' : 'reCAPTCHA v2',
'v3' : 'reCAPTCHA v3',
'h' : 'hCAPTCHA'
}
re_ver = ''
for i in re_ver_list:
if d_list[7] == i:
re_ver = '<option value="' + i + '">' + re_ver_list[i] + '</option>' + re_ver
else:
re_ver += '<option value="' + i + '">' + re_ver_list[i] + '</option>'
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('ext_api_req_set'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post" id="main_set_data">
<h2>1. ''' + load_lang('captcha') + '''</h2>
<a href="https://www.google.com/recaptcha/">(''' + load_lang('recaptcha') + ''')</a> <a href="https://www.hcaptcha.com/">(''' + load_lang('hcaptcha') + ''')</a>
<hr class="main_hr">
<span>''' + load_lang('public_key') + '''</span>
<hr class="main_hr">
<input name="recaptcha" value="''' + html.escape(d_list[0]) + '''">
<hr class="main_hr">
<span>''' + load_lang('secret_key') + '''</span>
<hr class="main_hr">
<input name="sec_re" value="''' + html.escape(d_list[1]) + '''">
<hr class="main_hr">
<span>''' + load_lang('version') + '''</span>
<hr class="main_hr">
<select name="recaptcha_ver">
''' + re_ver + '''
</select>
<h2>2. ''' + load_lang('email_setting') + '''</h1>
<input type="checkbox" name="email_have" ''' + ('checked' if d_list[9] != '' else '') + '''> ''' + \
load_lang('email_required') + '''
<h2>2.1. ''' + load_lang('smtp_setting') + '''</h1>
<a href="https://support.google.com/mail/answer/7126229">(Google)</a>
<hr class="main_hr">
<span>''' + load_lang('smtp_server') + '''</span>
<hr class="main_hr">
<input name="smtp_server" value="''' + html.escape(d_list[2]) + '''">
<hr class="main_hr">
<span>''' + load_lang('smtp_port') + '''</span>
<hr class="main_hr">
<input name="smtp_port" value="''' + html.escape(d_list[3]) + '''">
<hr class="main_hr">
<span>''' + load_lang('smtp_security') + '''</span>
<hr class="main_hr">
<select name="smtp_security">
''' + security_radios + '''
</select>
<hr class="main_hr">
<span>''' + load_lang('smtp_username') + '''</span>
<hr class="main_hr">
<input name="smtp_email" value="''' + html.escape(d_list[5]) + '''">
<hr class="main_hr">
<span>''' + load_lang('smtp_password') + '''</span>
<hr class="main_hr">
<input type="password" name="smtp_pass" value="''' + html.escape(d_list[6]) + '''">
<h2>3. ''' + load_lang('oauth') + ''' (''' + load_lang('not_working') + ''')</h2>
<a href="https://developers.google.com/identity/protocols/oauth2">(Google)</a>
<hr class="main_hr">
<span>''' + load_lang('oauth_client_id') + '''</span>
<hr class="main_hr">
<input name="oauth_client_id" value="''' + html.escape(d_list[8]) + '''">
<hr class="main_hr">
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
<script>simple_render('main_set_data');</script>
''',
menu = [['setting', load_lang('return')]]
))
```
#### File: openNAMU-PYnamu/route/main_func_setting_head.py
```python
from .tool.func import *
def main_func_setting_head(num, skin_name = ''):
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
if flask.request.method == 'POST':
if num == 4:
info_d = 'body'
end_r = 'body/top'
coverage = ''
elif num == 7:
info_d = 'bottom_body'
end_r = 'body/bottom'
coverage = ''
else:
info_d = 'head'
end_r = 'head'
if skin_name == '':
coverage = ''
else:
coverage = skin_name
curs.execute(db_change("select name from other where name = ? and coverage = ?"), [info_d, coverage])
if curs.fetchall():
curs.execute(db_change("update other set data = ? where name = ? and coverage = ?"), [
flask.request.form.get('content', ''),
info_d,
coverage
])
else:
curs.execute(db_change("insert into other (name, data, coverage) values (?, ?, ?)"), [info_d, flask.request.form.get('content', ''), coverage])
conn.commit()
admin_check(None, 'edit_set (' + info_d + ')')
if skin_name == '':
return redirect('/setting/' + end_r)
else:
return redirect('/setting/' + end_r + '/' + skin_name)
else:
if num == 4:
curs.execute(db_change("select data from other where name = 'body'"))
title = '_body'
start = ''
plus = '''
<button id="preview" type="button" onclick="load_raw_preview(\'content\', \'see_preview\')">''' + load_lang('preview') + '''</button>
<hr class="main_hr">
<div id="see_preview"></div>
'''
elif num == 7:
curs.execute(db_change("select data from other where name = 'bottom_body'"))
title = '_bottom_body'
start = ''
plus = '''
<button id="preview" type="button" onclick="load_raw_preview(\'content\', \'see_preview\')">''' + load_lang('preview') + '''</button>
<hr class="main_hr">
<div id="see_preview"></div>
'''
else:
curs.execute(db_change("select data from other where name = 'head' and coverage = ?"), [skin_name])
title = '_head'
start = '' + \
'<a href="?">(' + load_lang('all') + ')</a> ' + \
' '.join(['<a href="/setting/head/' + i + '">(' + i + ')</a>' for i in load_skin('', 1)]) + '''
<hr class="main_hr">
<span><style>CSS</style><br><script>JS</script></span>
<hr class="main_hr">
'''
plus = ''
head = curs.fetchall()
if head:
data = head[0][0]
else:
data = ''
if skin_name != '':
sub_plus = ' (' + skin_name + ')'
else:
sub_plus = ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang(data = 'main' + title, safe = 1), wiki_set(), wiki_custom(), wiki_css(['(HTML)' + sub_plus, 0])],
data = '''
<form method="post">
''' + start + '''
<textarea rows="25" placeholder="''' + load_lang('enter_html') + '''" name="content" id="content">''' + html.escape(data) + '''</textarea>
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
''' + plus + '''
</form>
''',
menu = [['setting', load_lang('return')]]
))
```
#### File: openNAMU-PYnamu/route/main_func_setting_main.py
```python
from .tool.func import *
def main_func_setting_main(db_set):
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/ban')
setting_list = {
0 : ['name', 'Wiki'],
2 : ['frontpage', 'FrontPage'],
4 : ['upload', '2'],
5 : ['skin', ''],
7 : ['reg', ''],
8 : ['ip_view', ''],
9 : ['back_up', '0'],
10 : ['port', '3000'],
11 : ['key', load_random_key()],
12 : ['update', 'stable'],
15 : ['encode', 'sha3'],
16 : ['host', '0.0.0.0'],
19 : ['slow_edit', '0'],
20 : ['requires_approval', ''],
21 : ['backup_where', ''],
22 : ['domain', flask.request.host],
23 : ['ua_get', ''],
24 : ['enable_comment', ''],
25 : ['enable_challenge', ''],
26 : ['edit_bottom_compulsion', ''],
27 : ['http_select', 'http'],
28 : ['title_max_length', ''],
29 : ['title_topic_max_length', '']
}
if flask.request.method == 'POST':
for i in setting_list:
curs.execute(db_change("update other set data = ? where name = ?"), [
flask.request.form.get(setting_list[i][0], setting_list[i][1]),
setting_list[i][0]
])
conn.commit()
admin_check(None, 'edit_set (main)')
return redirect('/setting/main')
else:
d_list = {}
for i in setting_list:
curs.execute(db_change('select data from other where name = ?'), [setting_list[i][0]])
db_data = curs.fetchall()
if not db_data:
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [setting_list[i][0], setting_list[i][1]])
d_list[i] = db_data[0][0] if db_data else setting_list[i][1]
else:
conn.commit()
encode_select = ''
encode_select_data = ['sha256', 'sha3']
for encode_select_one in encode_select_data:
if encode_select_one == d_list[15]:
encode_select = '<option value="' + encode_select_one + '">' + encode_select_one + '</option>' + encode_select
else:
encode_select += '<option value="' + encode_select_one + '">' + encode_select_one + '</option>'
tls_select = ''
tls_select_data = ['http', 'https']
for tls_select_one in tls_select_data:
if tls_select_one == d_list[27]:
tls_select = '<option value="' + tls_select_one + '">' + tls_select_one + '</option>' + tls_select
else:
tls_select += '<option value="' + tls_select_one + '">' + tls_select_one + '</option>'
check_box_div = ['', '', '', '', '', '', '', '']
for i in range(0, len(check_box_div)):
if i == 0:
acl_num = 7
elif i == 1:
acl_num = 8
elif i == 3:
acl_num = 20
elif i == 4:
acl_num = 23
elif i == 5:
acl_num = 24
elif i == 6:
acl_num = 25
elif i == 7:
acl_num = 26
if d_list[acl_num]:
check_box_div[i] = 'checked="checked"'
branch_div = ''
branch_list = ['stable', 'dev', 'beta']
for i in branch_list:
if d_list[12] == i:
branch_div = '<option value="' + i + '">' + i + '</option>' + branch_div
else:
branch_div += '<option value="' + i + '">' + i + '</option>'
sqlite_only = 'style="display:none;"' if db_set != 'sqlite' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('main_setting'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post" id="main_set_data">
<h2>1. ''' + load_lang('basic_set') + '''</h2>
<span>''' + load_lang('wiki_name') + '''</span>
<hr class="main_hr">
<input name="name" value="''' + html.escape(d_list[0]) + '''">
<hr class="main_hr">
<span><a href="/setting/main/logo">(''' + load_lang('wiki_logo') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('main_page') + '''</span>
<hr class="main_hr">
<input name="frontpage" value="''' + html.escape(d_list[2]) + '''">
<hr class="main_hr">
<span>''' + load_lang('tls_method') + '''</span>
<hr class="main_hr">
<select name="http_select">''' + tls_select + '''</select>
<hr class="main_hr">
<span>''' + load_lang('domain') + '''</span> (EX : 2du.pythonanywhere.com)
<hr class="main_hr">
<input name="domain" value="''' + html.escape(d_list[22]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_host') + '''</span>
<hr class="main_hr">
<input name="host" value="''' + html.escape(d_list[16]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_port') + '''</span>
<hr class="main_hr">
<input name="port" value="''' + html.escape(d_list[10]) + '''">
<hr class="main_hr">
<span>''' + load_lang('wiki_secret_key') + '''</span>
<hr class="main_hr">
<input type="password" name="key" value="''' + html.escape(d_list[11]) + '''">
<hr class="main_hr">
<span>''' + load_lang('encryption_method') + '''</span>
<hr class="main_hr">
<select name="encode">''' + encode_select + '''</select>
<h3>1.1. ''' + load_lang('communication_set') + '''</h3>
<input type="checkbox" name="enable_comment" ''' + check_box_div[5] + '''> ''' + load_lang('enable_comment_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<input type="checkbox" name="enable_challenge" ''' + check_box_div[6] + '''> ''' + load_lang('enable_challenge_function') + ''' (''' + load_lang('not_working') + ''')
<hr class="main_hr">
<h2>2. ''' + load_lang('design_set') + '''</h2>
<span>''' + load_lang('wiki_skin') + '''</span>
<hr class="main_hr">
<select name="skin">''' + load_skin(d_list[5] if d_list[5] != '' else 'tenshi') + '''</select>
<h2>3. ''' + load_lang('login_set') + '''</h2>
<input type="checkbox" name="reg" ''' + check_box_div[0] + '''> ''' + load_lang('no_register') + '''
<hr class="main_hr">
<input type="checkbox" name="ip_view" ''' + check_box_div[1] + '''> ''' + load_lang('hide_ip') + '''
<hr class="main_hr">
<input type="checkbox" name="requires_approval" ''' + check_box_div[3] + '''> ''' + load_lang('requires_approval') + '''
<hr class="main_hr">
<input type="checkbox" name="ua_get" ''' + check_box_div[4] + '''> ''' + load_lang('ua_get_off') + '''
<h2>4. ''' + load_lang('server_set') + '''</h2>
<span>''' + load_lang('max_file_size') + ''' (MB)</span>
<hr class="main_hr">
<input name="upload" value="''' + html.escape(d_list[4]) + '''">
<hr class="main_hr">
<span>''' + load_lang('update_branch') + '''</span>
<hr class="main_hr">
<select name="update">''' + branch_div + '''</select>
<span ''' + sqlite_only + '''>
<h3>4.1. ''' + load_lang('sqlite_only') + '''</h3>
<span>
''' + load_lang('backup_interval') + ' (' + load_lang('hour') + ') (' + load_lang('off') + ' : 0) ' + \
'(' + load_lang('restart_required') + ''')</span>
<hr class="main_hr">
<input name="back_up" value="''' + html.escape(d_list[9]) + '''">
<hr class="main_hr">
<span>
''' + load_lang('backup_where') + ' (' + load_lang('empty') + ' : ' + load_lang('default') + ') ' + \
'(' + load_lang('restart_required') + ''') (''' + load_lang('example') + ''' : ./data/backup.db)
</span>
<hr class="main_hr">
<input name="backup_where" value="''' + html.escape(d_list[21]) + '''">
<hr class="main_hr">
</span>
<h2>5. ''' + load_lang('edit_set') + '''</h2>
<span><a href="/setting/acl">(''' + load_lang('main_acl_setting') + ''')</a></span>
<hr class="main_hr">
<span>''' + load_lang('slow_edit') + ' (' + load_lang('second') + ') (' + load_lang('off') + ''' : 0)</span>
<hr class="main_hr">
<input name="slow_edit" value="''' + html.escape(d_list[19]) + '''">
<hr class="main_hr">
<input type="checkbox" name="edit_bottom_compulsion" ''' + check_box_div[7] + '''> ''' + load_lang('edit_bottom_compulsion') + ''' (''' + load_lang('beta') + ''')
<hr class="main_hr">
<span>''' + load_lang('title_max_length') + ''' (''' + load_lang('beta') + ''')</span>
<hr class="main_hr">
<input name="title_max_length" value="''' + html.escape(d_list[28]) + '''">
<hr class="main_hr">
<span>''' + load_lang('title_topic_max_length') + ''' (''' + load_lang('not_working') + ''')</span>
<hr class="main_hr">
<input name="title_topic_max_length" value="''' + html.escape(d_list[29]) + '''">
<hr class="main_hr">
<hr class="main_hr">
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
<script>simple_render('main_set_data');</script>
''',
menu = [['setting', load_lang('return')]]
))
```
#### File: openNAMU-PYnamu/route/main_func_setting.py
```python
from .tool.func import *
def main_func_setting():
li_list = [
['main', load_lang('main_setting')],
['phrase', load_lang('text_setting')],
['robot', 'robots.txt'],
['external', load_lang('ext_api_req_set')],
['head', load_lang('main_head')],
['body/top', load_lang('main_body')],
['body/bottom', load_lang('main_bottom_body')]
]
li_data = ''.join(['<li><a href="/setting/' + str(li[0]) + '">' + li[1] + '</a></li>' for li in li_list])
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('setting'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '<h2>' + load_lang('list') + '</h2><ul class="inside_ul">' + li_data + '</ul>',
menu = [['manager', load_lang('return')]]
))
```
#### File: openNAMU-PYnamu/route/main_search_goto.py
```python
from .tool.func import *
def main_search_goto(name = 'Test'):
with get_db_connect() as conn:
curs = conn.cursor()
if flask.request.form.get('search', None):
data = flask.request.form.get('search', 'Test')
else:
data = name
curs.execute(db_change("select title from data where title = ?"), [data])
t_data = curs.fetchall()
if t_data:
return redirect('/w/' + url_pas(data))
else:
return redirect('/search/' + url_pas(data))
```
#### File: route/tool/func_render_namumark.py
```python
from .func_tool import *
class class_do_render_namumark:
def __init__(
self,
curs,
doc_name,
doc_data,
doc_include
):
self.curs = curs
self.doc_data = doc_data
self.doc_name = doc_name
self.doc_include = doc_include
self.data_nowiki = {}
self.data_backlink = []
self.data_toc = ''
self.data_footnote = ''
self.data_category = ''
def do_render_text(self):
# <b>
self.render_data = re.sub(
r"'''((?:(?!''').)+)'''",
'<b>\g<1></b>',
self.render_data
)
# <i>
self.render_data = re.sub(
r"''((?:(?!'').)+)''",
'<i>\g<1></i>',
self.render_data
)
# <u>
self.render_data = re.sub(
r"__((?:(?!__).)+)__",
'<u>\g<1></u>',
self.render_data
)
# <sup>
self.render_data = re.sub(
r"\^\^\^((?:(?!\^\^\^).)+)\^\^\^",
'<sup>\g<1></sup>',
self.render_data
)
# <sup> 2
self.render_data = re.sub(
r"\^\^((?:(?!\^\^).)+)\^\^",
'<sup>\g<1></sup>',
self.render_data
)
# <sub>
self.render_data = re.sub(
r",,,((?:(?!,,,).)+),,,",
'<sub>\g<1></sub>',
self.render_data
)
# <sub> 2
self.render_data = re.sub(
r",,((?:(?!,,).)+),,",
'<sub>\g<1></sub>',
self.render_data
)
# <s>
self.render_data = re.sub(
r"--((?:(?!--).)+)--",
'<s>\g<1></s>',
self.render_data
)
# <s> 2
self.render_data = re.sub(
r"~~((?:(?!~~).)+)~~",
'<s>\g<1></s>',
self.render_data
)
def do_render_last(self):
# remove front_br and back_br
self.render_data = re.sub(
r'\n<front_br>',
'',
self.render_data
)
self.render_data = re.sub(
r'<back_br>\n',
'',
self.render_data
)
# \n to <br>
self.render_data = re.sub(
r'\n',
'<br>',
self.render_data
)
def __call__(self):
self.render_data = html.escape(self.doc_data)
self.render_data_js = ''
self.do_render_text()
self.do_render_last()
return [
self.render_data, # HTML
self.render_data_js, # JS
[] # Other
]
```
#### File: openNAMU-PYnamu/route/user_info.py
```python
from .tool.func import *
def user_info(name = ''):
with get_db_connect() as conn:
curs = conn.cursor()
if name == '':
ip = ip_check()
else:
ip = name
login_menu = ''
tool_menu = ''
if name == '':
curs.execute(db_change("select count(*) from alarm where name = ?"), [ip])
count = curs.fetchall()
if count and count[0][0] != 0:
tool_menu += '<li><a id="not_thing" href="/alarm">' + load_lang('alarm') + ' (' + str(count[0][0]) + ')</a></li>'
else:
tool_menu += '<li><a href="/alarm">' + load_lang('alarm') + '</a></li>'
if ip_or_user(ip) == 0:
login_menu += '''
<li><a href="/logout">''' + load_lang('logout') + '''</a></li>
<li><a href="/change">''' + load_lang('user_setting') + '''</a></li>
'''
tool_menu += '<li><a href="/watch_list">' + load_lang('watchlist') + '</a></li>'
tool_menu += '<li><a href="/star_doc">' + load_lang('star_doc') + '</a></li>'
tool_menu += '<li><a href="/challenge">' + load_lang('challenge') + '</a></li>'
tool_menu += '<li><a href="/acl/user:' + url_pas(ip) + '">' + load_lang('user_document_acl') + '</a></li>'
else:
login_menu += '''
<li><a href="/login">''' + load_lang('login') + '''</a></li>
<li><a href="/register">''' + load_lang('register') + '''</a></li>
<li><a href="/change">''' + load_lang('user_setting') + '''</a></li>
<li><a href="/login/find">''' + load_lang('password_search') + '''</a></li>
'''
tool_menu += '<li><a href="/change/head">' + load_lang('user_head') + '</a></li>'
login_menu = '<h2>' + load_lang('login') + '</h2><ul class="inside_ul">' + login_menu + '</ul>'
tool_menu = '<h2>' + load_lang('tool') + '</h2><ul class="inside_ul">' + tool_menu + '</ul>'
if admin_check(1) == 1:
curs.execute(db_change("select block from rb where block = ? and ongoing = '1'"), [ip])
ban_name = load_lang('release') if curs.fetchall() else load_lang('ban')
admin_menu = '''
<h2>''' + load_lang('admin') + '''</h2>
<ul class="inside_ul">
<li><a href="/ban/''' + url_pas(ip) + '''">''' + ban_name + '''</a></li>
<li><a href="/check/''' + url_pas(ip) + '''">''' + load_lang('check') + '''</a></li>
</ul>
'''
else:
admin_menu = ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('user_tool'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<h2>''' + load_lang('state') + '''</h2>
<div id="get_user_info"></div>
<script>load_user_info("''' + ip + '''");</script>
''' + login_menu + '''
''' + tool_menu + '''
<h2>''' + load_lang('other') + '''</h2>
<ul class="inside_ul">
<li><a href="/record/''' + url_pas(ip) + '''">''' + load_lang('record') + '''</a></li>
<li><a href="/record/topic/''' + url_pas(ip) + '''">''' + load_lang('discussion_record') + '''</a></li>
<li><a href="/topic/user:''' + url_pas(ip) + '''">''' + load_lang('user_discussion') + '''</a></li>
<li><a href="/count/''' + url_pas(ip) + '''">''' + load_lang('count') + '''</a></li>
</ul>
''' + admin_menu + '''
''',
menu = 0
))
``` |
{
"source": "2du/opennamu",
"score": 3
} |
#### File: opennamu/route/give_acl.py
```python
from .tool.func import *
def give_acl_2(conn, name):
curs = conn.cursor()
check_ok = ''
ip = ip_check()
if flask.request.method == 'POST':
check_data = 'acl (' + name + ')'
else:
check_data = None
user_data = re.search(r'^user:(.+)$', name)
if user_data:
if check_data and ip_or_user(ip) != 0:
return redirect('/login')
if user_data.group(1) != ip_check():
if admin_check(5) != 1:
if check_data:
return re_error('/error/3')
else:
check_ok = 'disabled'
else:
if admin_check(5) != 1:
if check_data:
return re_error('/error/3')
else:
check_ok = 'disabled'
if flask.request.method == 'POST':
acl_data = [['decu', flask.request.form.get('decu', '')]]
acl_data += [['dis', flask.request.form.get('dis', '')]]
acl_data += [['view', flask.request.form.get('view', '')]]
acl_data += [['why', flask.request.form.get('why', '')]]
curs.execute(db_change("select title from acl where title = ?"), [name])
if curs.fetchall():
for i in acl_data:
curs.execute(db_change("update acl set data = ? where title = ? and type = ?"), [i[1], name, i[0]])
else:
for i in acl_data:
curs.execute(db_change("insert into acl (title, data, type) values (?, ?, ?)"), [name, i[1], i[0]])
all_d = ''
for i in ['decu', 'dis', 'view']:
if flask.request.form.get(i, '') == '':
all_d += 'normal'
if i != 'view':
all_d += ' | '
else:
all_d += flask.request.form.get(i, '')
if i != 'view':
all_d += ' | '
admin_check(5, check_data + ' (' + all_d + ')')
conn.commit()
return redirect('/acl/' + url_pas(name))
else:
data = ''
acl_list = get_acl_list('user') if re.search(r'^user:', name) else get_acl_list()
if not re.search(r'^user:', name):
acl_get_list = [
[load_lang('document_acl'), 'decu'],
[load_lang('discussion_acl'), 'dis'],
[load_lang('view_acl'), 'view']
]
else:
acl_get_list = [
[load_lang('document_acl'), 'decu']
]
for i in acl_get_list:
data += '' + \
'<h2>' + i[0] + '</h2>' + \
'<hr class="main_hr">' + \
'<select name="' + i[1] + '" ' + check_ok + '>' + \
''
curs.execute(db_change("select data from acl where title = ? and type = ?"), [name, i[1]])
acl_data = curs.fetchall()
for data_list in acl_list:
check = 'selected="selected"' if acl_data and acl_data[0][0] == data_list else ''
data += '<option value="' + data_list + '" ' + check + '>' + (data_list if data_list != '' else 'normal') + '</option>'
data += '</select>'
data += '<hr class="main_hr">'
curs.execute(db_change("select data from acl where title = ? and type = ?"), [name, 'why'])
acl_data = curs.fetchall()
acl_why = html.escape(acl_data[0][0]) if acl_data else ''
data += '' + \
'<hr class="main_hr">' + \
'<input value="' + acl_why + '" placeholder="' + load_lang('why') + '" name="why" type="text" ' + check_ok + '>' + \
''
data += '''
<h2 id="exp">''' + load_lang('explanation') + '''</h2>
<ul class="inside_ul">
<li>normal : ''' + load_lang('unset') + '''</li>
<li>admin : ''' + load_lang('admin_acl') + '''</li>
<li>user : ''' + load_lang('member_acl') + '''</li>
<li>50_edit : ''' + load_lang('50_edit_acl') + '''</li>
<li>all : ''' + load_lang('all_acl') + '''</li>
<li>email : ''' + load_lang('email_acl') + '''</li>
<li>owner : ''' + load_lang('owner_acl') + '''</li>
<li>ban : ''' + load_lang('ban_acl') + '''</li>
<li>before : ''' + load_lang('before_acl') + '''</li>
<li>30_day : ''' + load_lang('30_day_acl') + '''</li>
<li>ban_admin : ''' + load_lang('ban_admin_acl') + '''</li>
<li>not_all : ''' + load_lang('not_all_acl') + '''</li>
</ul>
'''
return easy_minify(flask.render_template(skin_check(),
imp = [name, wiki_set(), wiki_custom(), wiki_css(['(' + load_lang('acl') + ')', 0])],
data = '''
<form method="post">
<a href="/setting/8">(''' + load_lang('main_acl_setting') + ''')</a>
''' + data + '''
<button type="submit" ''' + check_ok + '''>''' + load_lang('save') + '''</button>
</form>
''',
menu = [
['w/' + url_pas(name), load_lang('document')],
['manager', load_lang('admin')],
['admin_log?search=' + url_pas('acl (' + name + ')'), load_lang('acl_record')]
]
))
```
#### File: opennamu/route/user_challenge.py
```python
from .tool.func import *
def do_make_challenge_design(img, title, info, disable = 0):
if disable == 1:
table_style = 'style="border: 2px solid green"'
else:
table_style = 'style="border: 2px solid red"'
return '''
<table id="main_table_set" ''' + table_style + '''>
<tr>
<td id="main_table_width_quarter" rowspan="2">
<span style="font-size: 64px;">''' + img + '''</span>
</td>
<td>
<span style="font-size: 32px;">''' + title + '''</span>
</td>
</tr>
<tr>
<td>''' + info + '''</td>
</table>
<hr class="main_hr">
'''
def user_challenge():
ip = ip_check()
if ip_or_user(ip) == 1:
return redirect('/user')
with get_db_connect() as conn:
curs = conn.cursor()
data_html_green = ''
data_html_red = ''
data_html_green += do_make_challenge_design(
'🆕',
load_lang('challenge_title_register'),
load_lang('challenge_info_register'),
1
)
curs.execute(db_change('select count(*) from history where ip = ?'), [ip])
db_data = curs.fetchall()
disable = 1 if db_data[0][0] >= 1 else 0
data_html = do_make_challenge_design(
'✏',
load_lang('challenge_title_first_contribute'),
load_lang('challenge_info_first_contribute'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 10 else 0
data_html = do_make_challenge_design(
'🗊',
load_lang('challenge_title_tenth_contribute'),
load_lang('challenge_info_tenth_contribute'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 100 else 0
data_html = do_make_challenge_design(
'🗀',
load_lang('challenge_title_hundredth_contribute'),
load_lang('challenge_info_hundredth_contribute'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 1000 else 0
data_html = do_make_challenge_design(
'🖪',
load_lang('challenge_title_thousandth_contribute'),
load_lang('challenge_info_thousandth_contribute'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 10000 else 0
data_html = do_make_challenge_design(
'🖴',
load_lang('challenge_title_tenthousandth_contribute'),
load_lang('challenge_info_tenthousandth_contribute'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
curs.execute(db_change("select count(*) from topic where ip = ?"), [ip])
db_data = curs.fetchall()
disable = 1 if db_data[0][0] >= 1 else 0
data_html = do_make_challenge_design(
'🗨',
load_lang('challenge_title_first_discussion'),
load_lang('challenge_info_first_discussion'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 10 else 0
data_html = do_make_challenge_design(
'🗪',
load_lang('challenge_title_tenth_discussion'),
load_lang('challenge_info_tenth_discussion'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 100 else 0
data_html = do_make_challenge_design(
'🖅',
load_lang('challenge_title_hundredth_discussion'),
load_lang('challenge_info_hundredth_discussion'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 1000 else 0
data_html = do_make_challenge_design(
'☏',
load_lang('challenge_title_thousandth_discussion'),
load_lang('challenge_info_thousandth_discussion'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
disable = 1 if db_data[0][0] >= 10000 else 0
data_html = do_make_challenge_design(
'🖧',
load_lang('challenge_title_tenthousandth_discussion'),
load_lang('challenge_info_tenthousandth_discussion'),
disable
)
if disable == 1:
data_html_green += data_html
else:
data_html_red += data_html
data_html = data_html_green + data_html_red
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('challenge'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = data_html,
menu = [['user', load_lang('return')]]
))
``` |
{
"source": "2DU/PYnamu",
"score": 2
} |
#### File: 2DU/PYnamu/app.py
```python
import os
import re
from route.tool.func import *
# from route import *
for i_data in os.listdir("route"):
f_src = re.search(r"(.+)\.py$", i_data)
f_src = f_src.group(1) if f_src else ""
if not f_src in ('', '__init__'):
try:
exec(
"from route." + f_src + " " +
"import " + f_src
)
except:
try:
exec(
"from route." + f_src + " " +
"import " + f_src + "_2"
)
except:
pass
# Init-Version
version_list = json.loads(open(
'version.json',
encoding = 'utf8'
).read())
# Init-DB
data_db_set = class_check_json()
db_data_get(data_db_set['type'])
do_db_set(data_db_set)
load_db = get_db_connect_old(data_db_set)
conn = load_db.db_load()
curs = conn.cursor()
setup_tool = ''
try:
curs.execute(db_change('select data from other where name = "ver"'))
except:
setup_tool = 'init'
if setup_tool != 'init':
ver_set_data = curs.fetchall()
if ver_set_data:
if int(version_list['beta']['c_ver']) > int(ver_set_data[0][0]):
setup_tool = 'update'
else:
setup_tool = 'normal'
else:
setup_tool = 'init'
if setup_tool != 'normal':
# Init-Create_DB
create_data = {}
# 폐지 예정 (data_set으로 통합)
create_data['data_set'] = ['doc_name', 'doc_rev', 'set_name', 'set_data']
create_data['data'] = ['title', 'data', 'type']
create_data['history'] = ['id', 'title', 'data', 'date', 'ip', 'send', 'leng', 'hide', 'type']
create_data['rc'] = ['id', 'title', 'date', 'type']
create_data['acl'] = ['title', 'data', 'type']
# 개편 예정 (data_link로 변경)
create_data['back'] = ['title', 'link', 'type']
# 폐지 예정 (topic_set으로 통합) [가장 시급]
create_data['rd'] = ['title', 'sub', 'code', 'date', 'band', 'stop', 'agree', 'acl']
create_data['topic'] = ['id', 'data', 'date', 'ip', 'block', 'top', 'code']
# 폐지 예정 (user_set으로 통합)
create_data['rb'] = ['block', 'end', 'today', 'blocker', 'why', 'band', 'login', 'ongoing']
create_data['scan'] = ['user', 'title', 'type']
# 개편 예정 (wiki_set과 wiki_filter과 wiki_vote으로 변경)
create_data['other'] = ['name', 'data', 'coverage']
create_data['html_filter'] = ['html', 'kind', 'plus', 'plus_t']
create_data['vote'] = ['name', 'id', 'subject', 'data', 'user', 'type', 'acl']
# 개편 예정 (auth_list와 auth_log로 변경)
create_data['alist'] = ['name', 'acl']
create_data['re_admin'] = ['who', 'what', 'time']
# 개편 예정 (user_notice와 user_agent로 변경)
create_data['alarm'] = ['name', 'data', 'date']
create_data['ua_d'] = ['name', 'ip', 'ua', 'today', 'sub']
create_data['user_set'] = ['name', 'id', 'data']
for create_table in create_data:
for create in ['test'] + create_data[create_table]:
try:
curs.execute(db_change('select ' + create + ' from ' + create_table + ' limit 1'))
except:
try:
curs.execute(db_change('create table ' + create_table + '(test longtext default "")'))
except:
curs.execute(db_change("alter table " + create_table + " add column " + create + " longtext default ''"))
if setup_tool == 'update':
update(int(ver_set_data[0][0]), set_data)
else:
set_init()
set_init_always(version_list['beta']['c_ver'])
# Init-Route
class EverythingConverter(werkzeug.routing.PathConverter):
regex = '.*?'
class RegexConverter(werkzeug.routing.BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app = flask.Flask(
__name__,
template_folder = './'
)
app.config['JSON_AS_ASCII'] = False
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
log = logging.getLogger('waitress')
log.setLevel(logging.ERROR)
app.jinja_env.filters['md5_replace'] = md5_replace
app.jinja_env.filters['load_lang'] = load_lang
app.jinja_env.filters['cut_100'] = cut_100
app.url_map.converters['everything'] = EverythingConverter
app.url_map.converters['regex'] = RegexConverter
curs.execute(db_change('select data from other where name = "key"'))
sql_data = curs.fetchall()
app.secret_key = sql_data[0][0]
print('----')
# Init-DB_Data
server_set = {}
server_set_var = {
'host' : {
'display' : 'Host',
'require' : 'conv',
'default' : '0.0.0.0'
}, 'port' : {
'display' : 'Port',
'require' : 'conv',
'default' : '3000'
}, 'language' : {
'display' : 'Language',
'require' : 'select',
'default' : 'ko-KR',
'list' : ['ko-KR', 'en-US']
}, 'markup' : {
'display' : 'Markup',
'require' : 'select',
'default' : 'namumark',
'list' : ['namumark', 'markdown', 'custom', 'raw']
}, 'encode' : {
'display' : 'Encryption method',
'require' : 'select',
'default' : 'sha3',
'list' : ['sha3', 'sha256']
}
}
server_set_env = {
'host' : os.getenv('NAMU_HOST'),
'port' : os.getenv('NAMU_PORT'),
'language' : os.getenv('NAMU_LANG'),
'markup' : os.getenv('NAMU_MARKUP'),
'encode' : os.getenv('NAMU_ENCRYPT')
}
for i in server_set_var:
curs.execute(db_change('select data from other where name = ?'), [i])
server_set_val = curs.fetchall()
if server_set_val:
server_set_val = server_set_val[0][0]
elif server_set_env[i] != None:
server_set_val = server_set_env[i]
else:
if 'list' in server_set_var[i]:
print(server_set_var[i]['display'] + ' (' + server_set_var[i]['default'] + ') [' + ', '.join(server_set_var[i]['list']) + ']' + ' : ', end = '')
else:
print(server_set_var[i]['display'] + ' (' + server_set_var[i]['default'] + ') : ', end = '')
server_set_val = input()
if server_set_val == '':
server_set_val = server_set_var[i]['default']
elif server_set_var[i]['require'] == 'select':
if not server_set_val in server_set_var[i]['list']:
server_set_val = server_set_var[i]['default']
curs.execute(db_change('insert into other (name, data) values (?, ?)'), [i, server_set_val])
print(server_set_var[i]['display'] + ' : ' + server_set_val)
server_set[i] = server_set_val
print('----')
# Init-DB_care
if data_db_set['type'] == 'sqlite':
def back_up(back_time, back_up_where):
print('----')
try:
shutil.copyfile(
data_db_set['name'] + '.db',
back_up_where
)
print('Back up : OK')
except:
print('Back up : Error')
threading.Timer(
60 * 60 * back_time,
back_up,
[back_time, back_up_where]
).start()
curs.execute(db_change('select data from other where name = "back_up"'))
back_time = curs.fetchall()
back_time = int(number_check(back_time[0][0])) if back_time else 0
if back_time != 0:
curs.execute(db_change('select data from other where name = "backup_where"'))
back_up_where = curs.fetchall()
if back_up_where and back_up_where[0][0] != '':
back_up_where = back_up_where[0][0]
else:
back_up_where = 'back_' + data_db_set['name'] + '.db'
print('Back up state : ' + str(back_time) + ' hours')
back_up(back_time, back_up_where)
else:
print('Back up state : Turn off')
print('Now running... http://localhost:' + server_set['port'])
conn.commit()
# Init-custom
if os.path.exists('custom.py'):
from custom import custom_run
custom_run(load_db.db_get(), app)
# Func
# Func-inter_wiki
app.route('/inter_wiki', defaults = { 'tool' : 'inter_wiki' })(filter_inter_wiki)
app.route('/inter_wiki/del/<name>', defaults = { 'tool' : 'del_inter_wiki' })(filter_inter_wiki_delete)
app.route('/inter_wiki/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_inter_wiki' })(filter_inter_wiki_add)
app.route('/inter_wiki/add/<name>', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_inter_wiki' })(filter_inter_wiki_add)
app.route('/filter/document/list')(filter_document)
app.route('/filter/document/add/<name>', methods = ['POST', 'GET'])(filter_document_add)
app.route('/filter/document/add', methods = ['POST', 'GET'])(filter_document_add)
app.route('/filter/document/del/<name>')(filter_document_delete)
app.route('/edit_top', defaults = { 'tool' : 'edit_top' })(filter_inter_wiki)
app.route('/edit_top/del/<name>', defaults = { 'tool' : 'del_edit_top' })(filter_inter_wiki_delete)
app.route('/edit_top/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_edit_top' })(filter_inter_wiki_add)
app.route('/image_license', defaults = { 'tool' : 'image_license' })(filter_inter_wiki)
app.route('/image_license/del/<name>', defaults = { 'tool' : 'del_image_license' })(filter_inter_wiki_delete)
app.route('/image_license/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_image_license' })(filter_inter_wiki_add)
app.route('/edit_filter', defaults = { 'tool' : 'edit_filter' })(filter_inter_wiki)
app.route('/edit_filter/del/<name>', defaults = { 'tool' : 'del_edit_filter' })(filter_inter_wiki_delete)
app.route('/edit_filter/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_edit_filter' })(filter_inter_wiki_add)
app.route('/edit_filter/add/<name>', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_edit_filter' })(filter_inter_wiki_add)
app.route('/email_filter', defaults = { 'tool' : 'email_filter' })(filter_inter_wiki)
app.route('/email_filter/del/<name>', defaults = { 'tool' : 'del_email_filter' })(filter_inter_wiki_delete)
app.route('/email_filter/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_email_filter' })(filter_inter_wiki_add)
app.route('/file_filter', defaults = { 'tool' : 'file_filter' })(filter_inter_wiki)
app.route('/file_filter/del/<name>', defaults = { 'tool' : 'del_file_filter' })(filter_inter_wiki_delete)
app.route('/file_filter/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_file_filter' })(filter_inter_wiki_add)
app.route('/name_filter', defaults = { 'tool' : 'name_filter' })(filter_inter_wiki)
app.route('/name_filter/del/<name>', defaults = { 'tool' : 'del_name_filter' })(filter_inter_wiki_delete)
app.route('/name_filter/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_name_filter' })(filter_inter_wiki_add)
app.route('/extension_filter', defaults = { 'tool' : 'extension_filter' })(filter_inter_wiki)
app.route('/extension_filter/del/<name>', defaults = { 'tool' : 'del_extension_filter' })(filter_inter_wiki_delete)
app.route('/extension_filter/add', methods = ['POST', 'GET'], defaults = { 'tool' : 'plus_extension_filter' })(filter_inter_wiki_add)
# Func-list
# /list/document/old
app.route('/old_page')(list_old_page)
# /list/document/acl
@app.route('/acl_list')
def list_acl():
return list_acl_2(load_db.db_get())
# /list/document/acl/add
@app.route('/acl/<everything:name>', methods = ['POST', 'GET'])
def give_acl(name = None):
return give_acl_2(load_db.db_get(), name)
# /list/document/need
@app.route('/please')
def list_please():
return list_please_2(load_db.db_get())
# /list/document/all
@app.route('/title_index')
def list_title_index():
return list_title_index_2(load_db.db_get())
# /list/document/long
@app.route('/long_page')
def list_long_page():
return list_long_page_2(load_db.db_get(), 'long_page')
# /list/document/short
@app.route('/short_page')
def list_short_page():
return list_long_page_2(load_db.db_get(), 'short_page')
# /list/file
@app.route('/image_file_list')
def list_image_file():
return list_image_file_2(load_db.db_get())
# /list/admin
# /list/admin/list
@app.route('/admin_list')
def list_admin():
return list_admin_2(load_db.db_get())
# /list/admin/auth_use
@app.route('/admin_log', methods = ['POST', 'GET'])
def list_admin_use():
return list_admin_use_2(load_db.db_get())
# /list/user
@app.route('/user_log')
def list_user():
return list_user_2(load_db.db_get())
# /list/user/check
@app.route('/check/<name>')
def give_user_check(name = None):
return give_user_check_2(load_db.db_get(), name)
# /list/user/check/delete
@app.route('/check_delete', methods = ['POST', 'GET'])
def give_user_check_delete():
return give_user_check_delete_2(load_db.db_get())
# Func-auth
# /auth/give
# /auth/give/<name>
@app.route('/admin/<name>', methods = ['POST', 'GET'])
def give_admin(name = None):
return give_admin_2(load_db.db_get(), name)
# /auth/give
# /auth/give/<name>
@app.route('/ban', methods = ['POST', 'GET'])
@app.route('/ban/<name>', methods = ['POST', 'GET'])
def give_user_ban(name = None):
return give_user_ban_2(load_db.db_get(), name)
# /auth/list
@app.route('/admin_group')
def list_admin_group():
return list_admin_group_2(load_db.db_get())
# /auth/list/add/<name>
@app.route('/admin_plus/<name>', methods = ['POST', 'GET'])
def give_admin_groups(name = None):
return give_admin_groups_2(load_db.db_get(), name)
# /auth/list/delete/<name>
@app.route('/delete_admin_group/<name>', methods = ['POST', 'GET'])
def give_delete_admin_group(name = None):
return give_delete_admin_group_2(load_db.db_get(), name)
# /auth/history
# ongoing 반영 필요
@app.route('/block_log')
@app.route('/block_log/<regex("user"):tool>/<name>')
@app.route('/block_log/<regex("admin"):tool>/<name>')
def recent_block(name = 'Test', tool = 'all'):
return recent_block_2(load_db.db_get(), name, tool)
# Func-history
@app.route('/recent_change')
@app.route('/recent_changes')
def recent_change(name = None):
return recent_change_2(load_db.db_get(), name, '')
@app.route('/record/<name>')
def recent_record(name = None):
return recent_change_2(load_db.db_get(), name, 'record')
@app.route('/history/<everything:name>', methods = ['POST', 'GET'])
def recent_history(name = None):
return recent_change_2(load_db.db_get(), name, 'history')
@app.route('/history/tool/<int(signed = True):rev>/<everything:name>')
def recent_history_tool(name = 'Test', rev = 1):
return recent_history_tool_2(load_db.db_get(), name, rev)
@app.route('/history/delete/<int(signed = True):rev>/<everything:name>', methods = ['POST', 'GET'])
def recent_history_delete(name = 'Test', rev = 1):
return recent_history_delete_2(load_db.db_get(), name, rev)
@app.route('/history/hidden/<int(signed = True):rev>/<everything:name>')
def recent_history_hidden(name = 'Test', rev = 1):
return recent_history_hidden_2(load_db.db_get(), name, rev)
@app.route('/history/send/<int(signed = True):rev>/<everything:name>', methods = ['POST', 'GET'])
def recent_history_send(name = 'Test', rev = 1):
return recent_history_send_2(load_db.db_get(), name, rev)
@app.route('/history/reset/<everything:name>', methods = ['POST', 'GET'])
def recent_history_reset(name = 'Test'):
return recent_history_reset_2(load_db.db_get(), name)
@app.route('/history/add/<everything:name>', methods = ['POST', 'GET'])
def recent_history_add(name = 'Test'):
return recent_history_add_2(load_db.db_get(), name)
@app.route('/record/reset/<name>', methods = ['POST', 'GET'])
def recent_record_reset(name = 'Test'):
return recent_record_reset_2(load_db.db_get(), name)
@app.route('/record/topic/<name>')
def recent_record_topic(name = 'Test'):
return recent_record_topic_2(load_db.db_get(), name)
# 거처를 고심중
@app.route('/app_submit', methods = ['POST', 'GET'])
def recent_app_submit():
return recent_app_submit_2(load_db.db_get())
# Func-search
@app.route('/search', methods=['POST'])
def search():
return search_2(load_db.db_get())
@app.route('/goto', methods=['POST'])
@app.route('/goto/<everything:name>', methods=['POST'])
def search_goto(name = 'test'):
return search_goto_2(load_db.db_get(), name)
@app.route('/search/<everything:name>')
def search_deep(name = 'test'):
return search_deep_2(load_db.db_get(), name)
# Func-view
@app.route('/xref/<everything:name>')
def view_xref(name = 'Test'):
return view_xref_2(load_db.db_get(), name)
@app.route('/xref/this/<everything:name>')
def view_xref_this(name = 'Test'):
return view_xref_2(load_db.db_get(), name, xref_type = '2')
app.route('/raw/<everything:name>')(view_raw_2)
app.route('/raw/<everything:name>/doc_acl', defaults = { 'doc_acl' : 1 })(view_raw_2)
app.route('/raw/<everything:name>/doc_rev/<int:num>')(view_raw_2)
@app.route('/diff/<int(signed = True):num_a>/<int(signed = True):num_b>/<everything:name>')
def view_diff(name = 'Test', num_a = 1, num_b = 1):
return view_diff_2(load_db.db_get(), name, num_a, num_b)
@app.route('/down/<everything:name>')
def view_down(name = None):
return view_down_2(load_db.db_get(), name)
@app.route('/w/<everything:name>/doc_rev/<int(signed = True):doc_rev>')
@app.route('/w/<everything:name>/doc_from/<everything:doc_from>')
@app.route('/w/<everything:name>')
def view_read(name = 'Test', doc_rev = 0, doc_from = ''):
return view_read_2(load_db.db_get(), name, doc_rev, doc_from)
# Func-edit
@app.route('/revert/<everything:name>', methods = ['POST', 'GET'])
def edit_revert(name = None):
return edit_revert_2(load_db.db_get(), name)
app.route('/edit/<everything:name>', methods = ['POST', 'GET'])(edit)
app.route('/edit/<everything:name>/doc_from/<everything:name_load>', methods = ['POST', 'GET'])(edit)
app.route('/edit/<everything:name>/doc_section/<int:section>', methods = ['POST', 'GET'])(edit)
# 개편 예정
@app.route('/backlink_reset/<everything:name>')
def edit_backlink_reset(name = 'Test'):
return edit_backlink_reset_2(load_db.db_get(), name)
@app.route('/delete/<everything:name>', methods = ['POST', 'GET'])
def edit_delete(name = None):
return edit_delete_2(load_db.db_get(), name)
@app.route('/delete/doc_file/<everything:name>', methods = ['POST', 'GET'])
def edit_delete_file(name = 'test.jpg'):
return edit_delete_file_2(load_db.db_get(), name)
@app.route('/delete/doc_mutiple', methods = ['POST', 'GET'])
def edit_delete_mutiple():
return edit_delete_mutiple_2(load_db.db_get())
@app.route('/move/<everything:name>', methods = ['POST', 'GET'])
def edit_move(name = None):
return edit_move_2(load_db.db_get(), name)
# Func-topic
@app.route('/recent_discuss')
def recent_discuss():
return recent_discuss_2(load_db.db_get(), 'normal')
@app.route('/recent_discuss/close')
def recent_discuss_close():
return recent_discuss_2(load_db.db_get(), 'close')
@app.route('/recent_discuss/open')
def recent_discuss_open():
return recent_discuss_2(load_db.db_get(), 'open')
app.route('/thread/<int:topic_num>', methods = ['POST', 'GET'])(topic)
app.route('/topic/<everything:name>', methods = ['POST', 'GET'])(topic_list)
app.route('/thread/<int:topic_num>/tool')(topic_tool)
app.route('/thread/<int:topic_num>/setting', methods = ['POST', 'GET'])(topic_tool_setting)
app.route('/thread/<int:topic_num>/acl', methods = ['POST', 'GET'])(topic_tool_acl)
app.route('/thread/<int:topic_num>/delete', methods = ['POST', 'GET'])(topic_tool_delete)
app.route('/thread/<int:topic_num>/change', methods = ['POST', 'GET'])(topic_tool_change)
app.route('/thread/<int:topic_num>/comment/<int:num>/tool')(topic_comment_tool)
app.route('/thread/<int:topic_num>/comment/<int:num>/notice')(topic_comment_notice)
app.route('/thread/<int:topic_num>/comment/<int:num>/blind')(topic_comment_blind)
app.route('/thread/<int:topic_num>/comment/<int:num>/raw')(view_raw_2)
app.route('/thread/<int:topic_num>/comment/<int:num>/delete', methods = ['POST', 'GET'])(topic_comment_delete)
# Func-user
@app.route('/change', methods = ['POST', 'GET'])
def user_setting():
return user_setting_2(load_db.db_get(), server_set_var)
@app.route('/change/email', methods = ['POST', 'GET'])
def user_setting_email():
return user_setting_email_2(load_db.db_get())
app.route('/change/email/delete')(user_setting_email_delete)
@app.route('/change/email/check', methods = ['POST', 'GET'])
def user_setting_email_check():
return user_setting_email_check_2(load_db.db_get())
app.route('/change/key')(user_setting_key)
app.route('/change/key/delete')(user_setting_key_delete)
@app.route('/change/pw', methods = ['POST', 'GET'])
def user_setting_pw_change():
return user_setting_pw_change_2(load_db.db_get())
app.route('/change/head', methods=['GET', 'POST'])(user_setting_head)
app.route('/user')(user_info)
app.route('/user/<name>')(user_info)
app.route('/challenge')(user_challenge)
@app.route('/count')
@app.route('/count/<name>')
def user_count_edit(name = None):
return user_count_edit_2(load_db.db_get(), name)
app.route('/alarm')(user_alarm)
app.route('/alarm/delete')(user_alarm_del)
@app.route('/watch_list')
def user_watch_list():
return user_watch_list_2(load_db.db_get(), 'watch_list')
@app.route('/watch_list/<everything:name>')
def user_watch_list_name(name = 'Test'):
return user_watch_list_name_2(load_db.db_get(), 'watch_list', name)
@app.route('/star_doc')
def user_star_doc():
return user_watch_list_2(load_db.db_get(), 'star_doc')
@app.route('/star_doc/<everything:name>')
def user_star_doc_name(name = 'Test'):
return user_watch_list_name_2(load_db.db_get(), 'star_doc', name)
# Func-login
# 개편 예정
# login -> login/2fa -> login/2fa/email with login_id
# register -> register/email -> regiter/email/check with reg_id
# pass_find -> pass_find/email with find_id
@app.route('/login', methods = ['POST', 'GET'])
def login_login():
return login_login_2(load_db.db_get())
@app.route('/login/2fa', methods = ['POST', 'GET'])
def login_login_2fa():
return login_login_2fa_2(load_db.db_get())
@app.route('/register', methods = ['POST', 'GET'])
def login_register():
return login_register_2(load_db.db_get())
@app.route('/register/email', methods = ['POST', 'GET'])
def login_register_email():
return login_register_email_2(load_db.db_get())
@app.route('/register/email/check', methods = ['POST', 'GET'])
def login_register_email_check():
return login_register_email_check_2(load_db.db_get())
@app.route('/register/submit', methods = ['POST', 'GET'])
def login_register_submit():
return login_register_submit_2(load_db.db_get())
app.route('/login/find')(login_find)
app.route('/login/find/key', methods = ['POST', 'GET'])(login_find_key)
app.route('/login/find/email', methods = ['POST', 'GET'], defaults = { 'tool' : 'pass_find' })(login_find_email)
app.route('/login/find/email/check', methods = ['POST', 'GET'], defaults = { 'tool' : 'check_key' })(login_find_email_check)
app.route('/logout')(login_logout)
# Func-vote
app.route('/vote/<int:num>', methods = ['POST', 'GET'])(vote_select)
app.route('/vote/end/<int:num>')(vote_end)
app.route('/vote/close/<int:num>')(vote_close)
app.route('/vote', defaults = { 'list_type' : 'normal' })(vote_list)
app.route('/vote/list', defaults = { 'list_type' : 'normal' })(vote_list)
app.route('/vote/list/<int:num>', defaults = { 'list_type' : 'normal' })(vote_list)
app.route('/vote/list/close', defaults = { 'list_type' : 'close' })(vote_list)
app.route('/vote/list/close/<int:num>', defaults = { 'list_type' : 'close' })(vote_list)
app.route('/vote/add', methods = ['POST', 'GET'])(vote_add)
# Func-api
app.route('/api/w/<everything:name>/doc_tool/<tool>/doc_rev/<int(signed = True):rev>')(api_w)
app.route('/api/w/<everything:name>/doc_tool/<tool>', methods = ['POST', 'GET'])(api_w)
app.route('/api/w/<everything:name>', methods = ['GET', 'POST'])(api_w)
app.route('/api/raw/<everything:name>')(api_raw)
app.route('/api/version', defaults = { 'version_list' : version_list })(api_version)
app.route('/api/skin_info')(api_skin_info)
app.route('/api/skin_info/<name>')(api_skin_info)
app.route('/api/markup')(api_markup)
app.route('/api/user_info/<name>', methods = ['POST', 'GET'])(api_user_info)
app.route('/api/setting/<name>')(api_setting)
app.route('/api/thread/<int:topic_num>/<tool>/<int:num>')(api_topic_sub)
app.route('/api/thread/<int:topic_num>/<tool>')(api_topic_sub)
app.route('/api/thread/<int:topic_num>')(api_topic_sub)
app.route('/api/search/<everything:name>/doc_num/<int:num>/<int:page>')(api_search)
app.route('/api/search/<everything:name>')(api_search)
app.route('/api/recent_change/<int:num>')(api_recent_change)
app.route('/api/recent_change')(api_recent_change)
# recent_changes -> recent_change
app.route('/api/recent_changes')(api_recent_change)
app.route('/api/recent_discuss/<get_type>/<int:num>')(api_recent_discuss)
app.route('/api/recent_discuss/<int:num>')(api_recent_discuss)
app.route('/api/recent_discuss')(api_recent_discuss)
app.route('/api/sha224/<everything:data>', methods = ['POST', 'GET'])(api_sha224)
app.route('/api/title_index')(api_title_index)
app.route('/api/image/<everything:name>', methods = ['POST', 'GET'])(api_image_view)
# 이건 API 영역이 아닌 것 같아서 고심 중
app.route('/api/sitemap.xml')(api_sitemap)
# Func-main
# 여기도 전반적인 조정 시행 예정
app.route('/other')(main_tool_other)
app.route('/manager', methods = ['POST', 'GET'])(main_tool_admin)
app.route('/manager/<int:num>', methods = ['POST', 'GET'])(main_tool_admin)
app.route('/manager/<int:num>/<add_2>', methods = ['POST', 'GET'])(main_tool_admin)
app.route('/random')(main_func_random)
app.route('/upload', methods = ['POST', 'GET'])(main_func_upload)
app.route('/setting', defaults = { 'db_set' : data_db_set['type'] })(main_func_setting)
app.route('/setting/<int:num>', methods = ['POST', 'GET'], defaults = { 'db_set' : data_db_set['type'] })(main_func_setting)
app.route('/skin_set')(main_func_skin_set)
app.route('/main_skin_set')(main_func_skin_set)
app.route('/easter_egg.xml')(main_func_easter_egg)
# views -> view
app.route('/view/<everything:name>')(main_view)
app.route('/views/<everything:name>')(main_view)
app.route('/image/<everything:name>')(main_view_image)
# 조정 계획 중
app.route('/<regex("[^.]+\.(?:txt|xml)"):data>')(main_view_file)
app.route('/shutdown', methods = ['POST', 'GET'])(main_sys_shutdown)
app.route('/restart', methods = ['POST', 'GET'])(main_sys_restart)
app.route('/update', methods = ['POST', 'GET'])(main_sys_update)
app.errorhandler(404)(main_error_404)
if __name__ == "__main__":
waitress.serve(
app,
host = server_set['host'],
port = int(server_set['port']),
threads = 1
)
``` |
{
"source": "2dx/moderngl",
"score": 3
} |
#### File: moderngl/examples/heightmap_on_the_fly.py
```python
import numpy as np
from pyrr import Matrix44, Matrix33
import moderngl
from ported._example import Example
class HeightmapOnTheFly(Example):
title = "Heightmap - On the fly"
gl_version = (3, 3)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader="""
#version 330
uniform int dim;
out vec2 uv;
void main() {
// grid position from gl_VertexID normalized
vec2 pos = vec2(gl_VertexID % dim, gl_VertexID / dim) / dim;
gl_Position = vec4(pos, 0.0, 1.0);
}
""",
geometry_shader="""
#version 330
uniform sampler2D heightmap;
uniform mat4 projection;
uniform mat4 modelview;
uniform mat3 normal_matrix;
uniform int dim;
uniform float terrain_size;
out vec2 g_uv;
// out vec3 g_pos;
out vec3 normal;
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
const float scale = 0.5;
const float height = -0.15;
float calculateHeight(float h) {
return h * scale + height;
}
vec3 calculateNormal(vec2 uv, float step, float size) {
float hl = calculateHeight(texture(heightmap, uv + vec2(-step, 0.0)).r);
float hr = calculateHeight(texture(heightmap, uv + vec2(step, 0.0)).r);
float hu = calculateHeight(texture(heightmap, uv + vec2(0.0, step)).r);
float hd = calculateHeight(texture(heightmap, uv + vec2(0.0, -step)).r);
return normalize(vec3(hl - hr, hd - hu, size));
}
void main() {
// width and height of a quad
float size = terrain_size / dim;
// lower left corner of the quad
vec2 pos = gl_in[0].gl_Position.xy * terrain_size - terrain_size / 2.0;
vec2 uv = gl_in[0].gl_Position.xy;
float uv_step = 1.0 / dim;
// Calculate mvp
mat4 mvp = projection * modelview;
// Read heights for each corner
vec2 uv1 = uv + vec2(0.0, uv_step);
float h1 = calculateHeight(texture(heightmap, uv1).r);
vec2 uv2 = uv;
float h2 = calculateHeight(texture(heightmap, uv2).r);
vec2 uv3 = uv + vec2(uv_step, uv_step);
float h3 = calculateHeight(texture(heightmap, uv3).r);
vec2 uv4 = uv + vec2(uv_step, 0.0);
float h4 = calculateHeight(texture(heightmap, uv4).r);
// Upper left
vec4 pos1 = vec4(pos + vec2(0.0, size), h1, 1.0);
gl_Position = mvp * pos1;
g_uv = uv1;
normal = normal_matrix * calculateNormal(uv1, uv_step, size);
// g_pos = (modelview * pos1).xyz;
EmitVertex();
// Lower left
vec4 pos2 = vec4(pos, h2, 1.0);
gl_Position = mvp * pos2;
g_uv = uv2;
normal = normal_matrix * calculateNormal(uv2, uv_step, size);
// g_pos = (modelview * pos2).xyz;
EmitVertex();
// Upper right
vec4 pos3 = vec4(pos + vec2(size, size), h3, 1.0);
gl_Position = mvp * pos3;
g_uv = uv3;
normal = normal_matrix * calculateNormal(uv3, uv_step, size);
// g_pos = (modelview * pos3).xyz;
EmitVertex();
// Lower right
vec4 pos4 = vec4(pos + vec2(size, 0.0), h4, 1.0);
gl_Position = mvp * pos4;
g_uv = uv4;
normal = normal_matrix * calculateNormal(uv4, uv_step, size);
// g_pos = (modelview * pos4).xyz;
EmitVertex();
EndPrimitive();
}
""",
fragment_shader="""
#version 330
uniform sampler2D heightmap;
out vec4 fragColor;
in vec2 g_uv;
// in vec3 g_pos;
in vec3 normal;
void main() {
// vec3 normal = normalize(cross(dFdx(g_pos), dFdy(g_pos)));
float l = abs(dot(vec3(0, 0, 1), normal));
// fragColor = vec4(vec3(texture(heightmap, g_uv).r) * l, 1.0);
// fragColor = vec4(normal * l, 1.0);
fragColor = vec4(vec3(1.0) * l, 1.0);
}
""",
)
self.heightmap = self.load_texture_2d('heightmap_detailed.png')
self.heightmap.repeat_x = False
self.heightmap.repeat_y = False
self.dim = self.heightmap.width
self.vao = self.ctx.vertex_array(self.prog, [])
projection = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0, dtype='f4')
self.prog['projection'].write(projection)
self.prog['dim'] = self.dim - 1
self.prog['terrain_size'] = 1.0
def render(self, time, frame_time):
self.ctx.clear()
self.ctx.enable(moderngl.DEPTH_TEST | moderngl.CULL_FACE)
angle = time * 0.2
lookat = Matrix44.look_at(
(np.cos(angle), np.sin(angle), 0.4),
(0.0, 0.0, 0.0),
(0.0, 0.0, 1.0),
dtype='f4',
)
normal_matrix = Matrix33.from_matrix44(lookat).inverse.transpose()
self.prog['modelview'].write(lookat)
self.prog['normal_matrix'].write(normal_matrix.astype('f4').tobytes())
self.heightmap.use(0)
self.vao.render(moderngl.POINTS, vertices=(self.dim - 1) ** 2)
if __name__ == '__main__':
HeightmapOnTheFly.run()
```
#### File: moderngl/examples/matplotlib_as_texture.py
```python
import io
import numpy as np
from PIL import Image
from basic_colors_and_texture import ColorsAndTexture
import matplotlib
matplotlib.use('svg')
import matplotlib.pyplot as plt
class MatplotlibTexture(ColorsAndTexture):
title = "Matplotlib as Texture"
def __init__(self, **kwargs):
super().__init__(**kwargs)
figure_size = (640, 360)
temp = io.BytesIO()
plt.figure(0, figsize=(figure_size[0] / 72, figure_size[1] / 72))
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
n, bins, patches = plt.hist(x, 50, density=True, facecolor='r', alpha=0.75)
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
plt.savefig(temp, format='raw', dpi=72)
temp.seek(0)
img = Image.frombytes('RGBA', figure_size, temp.read()).transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
self.texture = self.ctx.texture(img.size, 3, img.tobytes())
self.texture.build_mipmaps()
if __name__ == '__main__':
MatplotlibTexture.run()
```
#### File: old-examples/GLUT/03_alpha_blending.py
```python
# import struct
# import sys
# import ModernGL
# from OpenGL.GLUT import (
# GLUT_DEPTH, GLUT_DOUBLE, GLUT_ELAPSED_TIME, GLUT_RGB, GLUT_WINDOW_HEIGHT, GLUT_WINDOW_WIDTH, glutCreateWindow,
# glutDisplayFunc, glutGet, glutIdleFunc, glutInit, glutInitDisplayMode, glutInitWindowSize, glutMainLoop,
# glutSwapBuffers
# )
# glutInit(sys.argv)
# glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
# glutInitWindowSize(800, 600)
# glutCreateWindow(b'Alpha Blending')
# ctx = ModernGL.create_context()
# prog = ctx.program(
# ctx.vertex_shader('''
# #version 330
# in vec2 in_vert;
# in vec4 in_color;
# out vec4 v_color;
# uniform vec2 Scale;
# uniform float Rotation;
# void main() {
# v_color = in_color;
# float r = Rotation * (0.5 + gl_InstanceID * 0.05);
# mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r));
# gl_Position = vec4((rot * in_vert) * Scale, 0.0, 1.0);
# }
# '''),
# ctx.fragment_shader('''
# #version 330
# in vec4 v_color;
# out vec4 f_color;
# void main() {
# f_color = v_color;
# }
# '''),
# ])
# rotation = prog.uniforms['Rotation']
# scale = prog.uniforms['Scale']
# vbo = ctx.buffer(struct.pack(
# '18f',
# 1.0, 0.0,
# 1.0, 0.0, 0.0, 0.5,
# -0.5, 0.86,
# 0.0, 1.0, 0.0, 0.5,
# -0.5, -0.86,
# 0.0, 0.0, 1.0, 0.5,
# ))
# vao = ctx.simple_vertex_array(prog, vbo, ['in_vert', 'in_color'])
# def display():
# width, height = glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT)
# elapsed = glutGet(GLUT_ELAPSED_TIME) / 1000.0
# ctx.clear(0.9, 0.9, 0.9)
# ctx.enable(ModernGL.BLEND)
# scale.value = (height / width * 0.75, 0.75)
# rotation.value = elapsed
# vao.render(instances=10)
# glutSwapBuffers()
# glutDisplayFunc(display)
# glutIdleFunc(display)
# glutMainLoop()
```
#### File: old-examples/GLUT/window_coordinates.py
```python
# import struct
# import sys
# import ModernGL
# from OpenGL.GLUT import (
# GLUT_DEPTH, GLUT_DOUBLE, GLUT_RGB, glutCreateWindow, glutDisplayFunc, glutIdleFunc, glutInit, glutInitDisplayMode,
# glutInitWindowSize, glutMainLoop, glutSwapBuffers
# )
# width, height = 1280, 720
# glutInit(sys.argv)
# glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
# glutInitWindowSize(width, height)
# glutCreateWindow(b'')
# ctx = ModernGL.create_context()
# prog = ctx.program(
# ctx.vertex_shader('''
# #version 330
# uniform vec2 WindowSize;
# in vec2 in_vert;
# in vec3 in_color;
# out vec3 v_color;
# void main() {
# v_color = in_color;
# gl_Position = vec4(in_vert / WindowSize * 2.0, 0.0, 1.0);
# }
# '''),
# ctx.fragment_shader('''
# #version 330
# in vec3 v_color;
# out vec4 f_color;
# void main() {
# f_color = vec4(v_color, 1.0);
# }
# '''),
# ])
# window_size = prog.uniforms['WindowSize']
# vbo = ctx.buffer(struct.pack(
# '15f',
# 0.0, 100.0, 1.0, 0.0, 0.0,
# -86.0, -50.0, 0.0, 1.0, 0.0,
# 86.0, -50.0, 0.0, 0.0, 1.0,
# ))
# vao = ctx.simple_vertex_array(prog, vbo, ['in_vert', 'in_color'])
# def display():
# ctx.viewport = (0, 0, width, height)
# ctx.clear(0.9, 0.9, 0.9)
# ctx.enable(ModernGL.BLEND)
# window_size.value = (width, height)
# vao.render()
# glutSwapBuffers()
# glutDisplayFunc(display)
# glutIdleFunc(display)
# glutMainLoop()
```
#### File: old-examples/pyglet/window_coordinates.py
```python
# import struct
# import ModernGL
# import pyglet
# wnd = pyglet.window.Window(1280, 720)
# ctx = ModernGL.create_context()
# prog = ctx.program(
# ctx.vertex_shader('''
# #version 330
# uniform vec2 WindowSize;
# in vec2 in_vert;
# in vec3 in_color;
# out vec3 v_color;
# void main() {
# v_color = in_color;
# gl_Position = vec4(in_vert / WindowSize * 2.0, 0.0, 1.0);
# }
# '''),
# ctx.fragment_shader('''
# #version 330
# in vec3 v_color;
# out vec4 f_color;
# void main() {
# f_color = vec4(v_color, 1.0);
# }
# '''),
# ])
# window_size = prog.uniforms['WindowSize']
# vbo = ctx.buffer(struct.pack(
# '15f',
# 0.0, 100.0, 1.0, 0.0, 0.0,
# -86.0, -50.0, 0.0, 1.0, 0.0,
# 86.0, -50.0, 0.0, 0.0, 1.0,
# ))
# vao = ctx.simple_vertex_array(prog, vbo, ['in_vert', 'in_color'])
# def update(dt):
# ctx.viewport = (0, 0, wnd.width, wnd.height)
# ctx.clear(0.9, 0.9, 0.9)
# ctx.enable(ModernGL.BLEND)
# window_size.value = (wnd.width, wnd.height)
# vao.render()
# pyglet.clock.schedule_interval(update, 1.0 / 60.0)
# pyglet.app.run()
```
#### File: moderngl/examples/raymarching.py
```python
import numpy as np
from ported._example import Example
class Raymarching(Example):
gl_version = (3, 3)
window_size = (500, 500)
aspect_ratio = 1.0
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.vaos = []
program = self.ctx.program(
vertex_shader=VERTEX_SHADER,
fragment_shader=FRAGMENT_SHADER
)
vertex_data = np.array([
# x, y, z, u, v
-1.0, -1.0, 0.0, 0.0, 0.0,
+1.0, -1.0, 0.0, 1.0, 0.0,
-1.0, +1.0, 0.0, 0.0, 1.0,
+1.0, +1.0, 0.0, 1.0, 1.0,
]).astype(np.float32)
content = [(
self.ctx.buffer(vertex_data),
'3f 2f',
'in_vert', 'in_uv'
)]
idx_data = np.array([
0, 1, 2,
1, 2, 3
]).astype(np.int32)
idx_buffer = self.ctx.buffer(idx_data)
self.vao = self.ctx.vertex_array(program, content, idx_buffer)
self.u_time = program.get("T", 0.0)
def render(self, time: float, frame_time: float):
self.u_time.value = time
self.vao.render()
VERTEX_SHADER = '''
#version 430
in vec3 in_vert;
in vec2 in_uv;
out vec2 v_uv;
void main()
{
gl_Position = vec4(in_vert.xyz, 1.0);
v_uv = in_uv;
}
'''
FRAGMENT_SHADER = '''
#version 430
#define FAR 80.0
#define MARCHING_MINSTEP 0
#define MARCHING_STEPS 128
#define MARCHING_CLAMP 0.000001
#define NRM_OFS 0.001
#define AO_OFS 0.01
#define PI 3.141592
#define FOG_DIST 2.5
#define FOG_DENSITY 0.32
#define FOG_COLOR vec3(0.35, 0.37, 0.42)
layout(location=0) uniform float T;
// in vec2 v_uv: screen space coordniate
in vec2 v_uv;
// out color
out vec4 out_color;
// p: sample position
// r: rotation in Euler angles (radian)
vec3 rotate(vec3 p, vec3 r)
{
vec3 c = cos(r);
vec3 s = sin(r);
mat3 rx = mat3(
1, 0, 0,
0, c.x, -s.x,
0, s.x, c.s
);
mat3 ry = mat3(
c.y, 0, s.y,
0, 1, 0,
-s.y, 0, c.y
);
mat3 rz = mat3(
c.z, -s.z, 0,
s.z, c.z, 0,
0, 0, 1
);
return rz * ry * rx * p;
}
// p: sample position
// t: tiling distance
vec3 tile(vec3 p, vec3 t)
{
return mod(p, t) - 0.5 * t;
}
// p: sample position
// r: radius
float sphere(vec3 p, float r)
{
return length(p) - r;
}
// p: sample position
// b: width, height, length (scalar along x, y, z axis)
float box(vec3 p, vec3 b)
{
return length(max(abs(p) - b, 0.0));
}
// c.x, c.y: offset
// c.z: radius
float cylinder(vec3 p, vec3 c)
{
return length(p.xz - c.xy) - c.z;
}
// a, b: capsule position from - to
// r: radius r
float capsule(vec3 p, vec3 a, vec3 b, float r)
{
vec3 dp = p - a;
vec3 db = b - a;
float h = clamp(dot(dp, db) / dot(db, db), 0.0, 1.0);
return length(dp - db * h) - r;
}
// p: sample position
// c: cylinder c
// b: box b
float clamp_cylinder(vec3 p, vec3 c, vec3 b)
{
return max(cylinder(p, c), box(p, b));
}
// a: primitive a
// b: primitive b
// k: blending amount
float blend(float a, float b, float k)
{
float h = clamp(0.5 + 0.5 * (a - b) / k, 0.0, 1.0);
return mix(a, b, h) - k * h * (1.0 - h);
}
float displace(vec3 p, float m, float s)
{
return sin(p.x * m) * sin(p.y * m) * sin(p.z * m) * s;
}
// world
float sample_world(vec3 p, inout vec3 c)
{
vec3 b_left_pos = p - vec3(-0.8, -0.25, 0.0);
b_left_pos = rotate(b_left_pos, vec3(T, 0.0, 0.0));
float d_box_left = box(b_left_pos, vec3(0.4));
vec3 b_right_pos = p - vec3(+0.8, -0.25, 0.0);
b_right_pos = rotate(b_right_pos, vec3(0.0, 0.0, T));
float d_box_right = box(b_right_pos, vec3(0.4));
vec3 b_up_pos = p - vec3(0.0, 1.05, 0.0);
b_up_pos = rotate(b_up_pos, vec3(0.0, T, 0.0));
float d_box_up = box(b_up_pos, vec3(0.4));
float d_box = FAR;
d_box = min(d_box, d_box_left);
d_box = min(d_box, d_box_right);
d_box = min(d_box, d_box_up);
vec3 s_pos = p - vec3(0.0, 0.2, 0.0);
float d_sphere = sphere(s_pos, 0.65);
float result = blend(d_sphere, d_box, 0.3);
if (result < FAR)
{
c.x = 0.5;
c.y = 0.75;
c.z = 0.25;
}
return result;
}
// o: origin
// r: ray
// c: color
float raymarch(vec3 o, vec3 r, inout vec3 c)
{
float t = 0.0;
vec3 p = vec3(0);
float d = 0.0;
for (int i = MARCHING_MINSTEP; i < MARCHING_STEPS; i++)
{
p = o + r * t;
d = sample_world(p, c);
if (abs(d) < MARCHING_CLAMP)
{
return t;
}
t += d;
}
return FAR;
}
// p: sample surface
vec3 norm(vec3 p)
{
vec2 o = vec2(NRM_OFS, 0.0);
vec3 dump_c = vec3(0);
return normalize(vec3(
sample_world(p + o.xyy, dump_c) - sample_world(p - o.xyy, dump_c),
sample_world(p + o.yxy, dump_c) - sample_world(p - o.yxy, dump_c),
sample_world(p + o.yyx, dump_c) - sample_world(p - o.yyx, dump_c)
));
}
void main()
{
// o: origin
vec3 o = vec3(0.0, 0.5, -6.0);
// r: ray
vec3 r = normalize(vec3(v_uv - vec2(0.5, 0.5), 1.001));
// l: light
vec3 l = normalize(vec3(-0.5, -0.2, 0.1));
// c: albedo
vec3 c = vec3(0.125);
float d = raymarch(o, r, c);
// pixel color
vec3 color = vec3(0);
if (d < FAR)
{
vec3 p = o + r * d;
vec3 n = norm(p);
float lambert = dot(n, l);
lambert = clamp(lambert, 0.1, 1.0);
#define SPEC_COLOR vec3(0.85, 0.75, 0.5)
vec3 h = normalize(o + l);
float ndh = clamp(dot(n, h), 0.0, 1.0);
float ndv = clamp(dot(n, -o), 0.0, 1.0);
float spec = pow((ndh + ndv) + 0.01, 64.0) * 0.25;
color = c * lambert + SPEC_COLOR * spec;
}
// add simple fog
color = mix(FOG_COLOR, color, clamp(pow(FOG_DIST / abs(d), FOG_DENSITY), 0.0, 1.0));
out_color = vec4(color, 1.0);
}
'''
if __name__ == '__main__':
Raymarching.run()
```
#### File: moderngl/examples/tesselation.py
```python
import numpy as np
import moderngl
from ported._example import Example
class Tessellation(Example):
title = "Tessellation"
gl_version = (4, 0)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 400 core
in vec2 in_pos;
void main() { gl_Position = vec4(in_pos, 0.0, 1.0); }
''',
tess_control_shader='''
#version 400 core
layout(vertices = 4) out;
void main() {
// set tesselation levels, TODO compute dynamically
gl_TessLevelOuter[0] = 1;
gl_TessLevelOuter[1] = 32;
// pass through vertex positions
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
''',
tess_evaluation_shader='''
#version 400 core
layout(isolines, fractional_even_spacing, ccw) in;
// compute a point on a bezier curve with the points p0, p1, p2, p3
// the parameter u is in [0, 1] and determines the position on the curve
vec3 bezier(float u, vec3 p0, vec3 p1, vec3 p2, vec3 p3) {
float B0 = (1.0 - u) * (1.0 - u) * (1.0 - u);
float B1 = 3.0 * (1.0 - u) * (1.0 - u) * u;
float B2 = 3.0 * (1.0 - u) * u * u;
float B3 = u * u * u;
return B0 * p0 + B1 * p1 + B2 * p2 + B3 * p3;
}
void main() {
float u = gl_TessCoord.x;
vec3 p0 = vec3(gl_in[0].gl_Position);
vec3 p1 = vec3(gl_in[1].gl_Position);
vec3 p2 = vec3(gl_in[2].gl_Position);
vec3 p3 = vec3(gl_in[3].gl_Position);
gl_Position = vec4(bezier(u, p0, p1, p2, p3), 1.0);
}
''',
fragment_shader='''
#version 400 core
out vec4 frag_color;
void main() { frag_color = vec4(1.0); }
'''
)
# four vertices define a cubic Bézier curve; has to match the shaders
self.ctx.patch_vertices = 4
self.ctx.line_width = 5.0
vertices = np.array([
[-1.0, 0.0],
[-0.5, 1.0],
[0.5, -1.0],
[1.0, 0.0],
])
vbo = self.ctx.buffer(vertices.astype('f4'))
self.vao = self.ctx.simple_vertex_array(self.prog, vbo, 'in_pos')
def render(self, time, frame_time):
self.ctx.clear(0.2, 0.4, 0.7)
self.vao.render(mode=moderngl.PATCHES)
if __name__ == '__main__':
Tessellation.run()
``` |
{
"source": "2e0byo/apigpio",
"score": 4
} |
#### File: apigpio/apigpio/utils.py
```python
import functools
def Debounce(threshold=100):
"""
Simple debouncing decorator for apigpio callbacks.
Example:
`@Debouncer()
def my_cb(gpio, level, tick)
print('gpio cb: {} {} {}'.format(gpio, level, tick))
`
The threshold can be given to the decorator as an argument (in millisec).
This decorator can be used both on function and object's methods.
Warning: as the debouncer uses the tick from pigpio, which wraps around
after approximately 1 hour 12 minutes, you could theoretically miss one
call if your callback is called twice with that interval.
"""
threshold *= 1000
max_tick = 0xFFFFFFFF
class _decorated(object):
def __init__(self, pigpio_cb):
self._fn = pigpio_cb
self.last = 0
self.is_method = False
def __call__(self, *args, **kwargs):
if self.is_method:
tick = args[3]
else:
tick = args[2]
if self.last > tick:
delay = max_tick-self.last + tick
else:
delay = tick - self.last
if delay > threshold:
self._fn(*args, **kwargs)
print('call passed by debouncer {} {} {}'
.format(tick, self.last, threshold))
self.last = tick
else:
print('call filtered out by debouncer {} {} {}'
.format(tick, self.last, threshold))
def __get__(self, instance, type=None):
# with is called when an instance of `_decorated` is used as a class
# attribute, which is the case when decorating a method in a class
self.is_method = True
return functools.partial(self, instance)
return _decorated
```
#### File: apigpio/samples/gpio_debounce.py
```python
import asyncio
import apigpio
import functools
# This sample demonstrates both writing to gpio and listening to gpio changes.
# It also shows the Debounce decorator, which might be useful when registering
# a callback for a gpio connected to a button, for example.
BT_GPIO = 18
LED_GPIO = 21
class Blinker(object):
"""
Led Blinker
"""
def __init__(self, pi, gpio):
self.pi = pi
self.led_gpio = gpio
self.blink = False
@asyncio.coroutine
def start(self):
self.blink = True
print('Start Blinking')
is_on = True
while self.blink:
if is_on:
yield from self.pi.write(self.led_gpio, apigpio.ON)
else:
yield from self.pi.write(self.led_gpio, apigpio.OFF)
is_on = not is_on
yield from asyncio.sleep(0.2)
yield from self.pi.write(self.led_gpio, apigpio.OFF)
def stop(self):
self.blink = False
def toggle(self):
if not self.blink:
asyncio.async(self.start())
else:
print('Stop Blinking')
self.blink = False
# The DeBounce can simply be applied to your callback.
# Optionnally, the threshold can be specified in milliseconds : @Debounce(200)
@apigpio.Debounce()
def on_bt(gpio, level, tick, blinker=None):
print('on_input {} {} {}'.format(gpio, level, tick))
blinker.toggle()
@asyncio.coroutine
def subscribe(pi):
yield from pi.set_mode(BT_GPIO, apigpio.INPUT)
yield from pi.set_mode(LED_GPIO, apigpio.OUTPUT)
blinker = Blinker(pi, LED_GPIO)
# functools.partial is usefull when your callback requires extra arguments:
cb = functools.partial(on_bt, blinker=blinker)
yield from pi.add_callback(BT_GPIO, edge=apigpio.RISING_EDGE,
func=cb)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
pi = apigpio.Pi(loop)
address = ('192.168.1.3', 8888)
loop.run_until_complete(pi.connect(address))
loop.run_until_complete(subscribe(pi))
loop.run_forever()
```
#### File: apigpio/samples/gpio_notification.py
```python
import asyncio
import apigpio
BT1_GPIO = 18
BT2_GPIO = 23
def on_input(gpio, level, tick):
print('on_input {} {} {}'.format(gpio, level, tick))
@asyncio.coroutine
def subscribe(pi):
yield from pi.set_mode(BT1_GPIO, apigpio.INPUT)
yield from pi.set_mode(BT2_GPIO, apigpio.INPUT)
yield from pi.add_callback(BT1_GPIO, edge=apigpio.RISING_EDGE,
func=on_input)
yield from pi.add_callback(BT2_GPIO, edge=apigpio.RISING_EDGE,
func=on_input)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
pi = apigpio.Pi(loop)
address = ('192.168.1.3', 8888)
loop.run_until_complete(pi.connect(address))
loop.run_until_complete(subscribe(pi))
loop.run_forever()
```
#### File: apigpio/samples/gpio_script.py
```python
import asyncio
import apigpio
LED_GPIO = 21
@asyncio.coroutine
def start(pi, address, gpio):
yield from pi.connect(address)
yield from pi.set_mode(gpio, apigpio.OUTPUT)
# Create the script.
script = 'w {e} 1 mils 500 w {e} 0 mils 500 w {e} 1 mils 500 w {e} 0'\
.format(e=gpio)
sc_id = yield from pi.store_script(script)
# Run the script.
yield from pi.run_script(sc_id)
yield from asyncio.sleep(5)
yield from pi.delete_script(sc_id)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
pi = apigpio.Pi(loop)
address = ('192.168.1.3', 8888)
loop.run_until_complete(start(pi, address, LED_GPIO))
``` |
{
"source": "2e0byo/bib",
"score": 3
} |
#### File: 2e0byo/bib/save-bib.py
```python
import bibtexparser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bparser import BibTexParser
from pathlib import Path
def load_uniq(fn):
with Path(fn).open() as f:
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parsed = bibtexparser.load(f, parser)
seen = {}
parsed.entries = [
seen.setdefault(x["ID"], x) for x in parsed.entries if x["ID"] not in seen
]
return parsed
bibs = {}
for f in Path(".").glob("*.bib"):
bibs[f.stem] = load_uniq(f)
print("")
total = 0
for k, v in bibs.items():
n = len(v.entries)
print(f"{k}: {n} entries")
total += n
print("Total:", total)
print("")
theology_entries = {x["ID"]: x for x in bibs["theology"].entries}
for name, bib in bibs.items():
if name == "theology":
continue
for entry in bib.entries:
if entry["ID"] in theology_entries:
del theology_entries[entry["ID"]]
bibs["theology"].entries = [v for _, v in theology_entries.items()]
total = 0
for k, v in bibs.items():
n = len(v.entries)
print(f"{k}: {n} entries")
total += n
print("Total:", total)
writer = BibTexWriter()
writer.order_entries_by = ("author", "year")
writer.comma_first = True
for name, obj in bibs.items():
with Path(f"{name}.bib").open("w") as f:
f.write(writer.write(obj))
``` |
{
"source": "2e0byo/durham-delivery-bot",
"score": 3
} |
#### File: durham-delivery-bot/durham_delivery_bot/__init__.py
```python
from itertools import chain
from pathlib import Path
from typing import Optional
from .bot import request
from .cart import parse_records
from .log import logger
def format_records(records: list[dict]) -> str:
out = ""
libraries = sorted(set(chain.from_iterable(x["Copies"].keys() for x in records)))
for library in libraries:
out += f"# {library}\n\n"
holdings = [r for r in records if library in r["Copies"].keys()]
for record in sorted(holdings, key=lambda x: x["Copies"][library]["Shelfmark"]):
out += "{} {:>40.40} | {:>15.15}\n".format(
record["Copies"][library]["Shelfmark"],
record["Title"],
record.get("Author", record.get("Other Author", "")),
)
out += "\n"
return out
def categorise(records: list[dict], in_person: list[str]) -> tuple[dict, dict]:
collect = []
reserve = []
for record in records:
sources = record["Copies"].keys()
if any(x in src for src in sources for x in in_person):
collect.append(record)
else:
reserve.append(record)
return collect, reserve
def process(
fn: Path,
in_person: Optional[list[str]],
student_type: str,
reason: str,
delivery_method: str,
out: Optional[Path],
dry_run: bool = False,
):
records = parse_records(fn)
collect, reserve = categorise(records, in_person)
if collect:
logger.info("Books to collect:")
formatted = format_records(collect)
print(formatted)
if out:
with out.open("w") as f:
f.write(formatted)
if reserve:
if dry_run:
logger.info("The following records would be reserved:")
print(format_records(reserve))
else:
logger.info("Reserving books to reserve")
request(reserve)
``` |
{
"source": "2e0byo/durham-directory",
"score": 3
} |
#### File: 2e0byo/durham-directory/verify.py
```python
from pathlib import Path
from csv import DictReader, DictWriter
from durham_directory import QueryOne, QueryError
from argparse import ArgumentParser
def robust_query(name, surname):
try:
return query(oname=name, surname=surname)
except QueryError:
try:
res = query(surname=surname)
if res:
return res
except QueryError:
pass
return query(oname=name)
def verify(record):
print("Verifying", record["Name"], record["Surname"])
try:
email = robust_query(record["Name"], record["Surname"])["Email"]
if email != record["Email"]:
record["new_email"] = email
except QueryError as e:
print("Unable to match:", e)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("CSVFILE", type=Path, help="CSV of records.")
parser.add_argument("--out", type=Path, help="Outfile (optional).")
args = parser.parse_args()
with args.CSVFILE.open() as f:
data = list(DictReader(f))
query = QueryOne()
for record in data:
verify(record)
for record in data:
if record.get("new_email"):
print(
f"Incorrect email for {record['Name']} {record['Surname']}"
f"corrected from {record['Email']} to {record['new_email']}"
)
if args.out:
with args.out.open("w") as f:
writer = DictWriter(f, fieldnames=data[0].keys())
for record in data:
record["Email"] = record.get("new_email", record["Email"])
try:
del record["new_email"]
except KeyError:
pass
writer.writerow(record)
``` |
{
"source": "2e0byo/extract-chant",
"score": 3
} |
#### File: 2e0byo/extract-chant/line_splitter.py
```python
import cv2
import numpy as np
white_bleed = (.5, .5) # percentage of white to add to selection (above,below)
min_white = 20 # minimum length of white pixels
def read_image(fname):
"""Read image and return image and threshed version for analysis"""
img = cv2.imread(fname)
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except cv2.error:
gray = img.copy()
th, threshed = cv2.threshold(gray, 127, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
return (img, gray, threshed)
def min_area_rect_rotation(threshed):
"""
Find Min area rectangle of all non-zero pixels and return angle to
rotate image to align vertically
"""
pts = cv2.findNonZero(threshed)
(cx, cy), (w, h), ang = cv2.minAreaRect(pts)
if w > h: # rotate to portrait
w, h = h, w
ang += 90
print("angle rect:", ang)
M = cv2.getRotationMatrix2D((cx, cy), ang, 1.0)
return (ang, M)
def rotate_image_and_threshed(M, threshed, img):
"""
Rotate image by ang
"""
rotated = cv2.warpAffine(threshed, M, (img.shape[1], img.shape[0]))
rotated_original = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
return (rotated, rotated_original)
def get_lines(img, th=False):
"""
Get upper and lower boundary of each line in image by stepping
through averaged histogram. Threshold defaults to minimum
value in hist (probably 0).
"""
hist = cv2.reduce(img, 1, cv2.REDUCE_AVG).reshape(-1)
if not th:
th = min(hist)
H, W = img.shape[:2]
uppers_y = [y for y in range(H - 1) if hist[y] <= th and hist[y + 1] > th]
lowers_y = [y for y in range(H - 1) if hist[y] > th and hist[y + 1] <= th]
hist = cv2.reduce(img, 0, cv2.REDUCE_AVG).reshape(-1)
if not th:
th = min(hist)
th = 1
black_width = 100
lower_x = min([
x for x in range(W - black_width)
if hist[x] <= th
and all([hist[x + i + 1] > th for i in range(black_width)])
])
upper_x = max([
x for x in range(W - black_width)
if hist[x] > th
and all([hist[x + i + 1] <= th for i in range(black_width)])
])
if len(lowers_y) < len(uppers_y): # if ends with cut-off line
uppers_y.pop()
return (uppers_y, lowers_y, lower_x, upper_x)
def smarten_lines(uppers_y, lowers_y):
"""
Add appropriate whitespace around lines and combine any which are too small
"""
bands = []
last_band = -1
gaps = []
for i in range(len(uppers_y)):
if i > 0:
gap = uppers_y[i] - lowers_y[i - 1]
gaps.append(gap)
else:
gap = False
if gap is not False and gap < min_white:
bands[last_band][1] = lowers_y[i]
else:
if gap is False:
gap = 0
else:
bands[last_band][1] += round(gap * white_bleed[0])
bands.append(
[uppers_y[i] - round(gap * white_bleed[0]), lowers_y[i]])
last_band += 1
# get mean gap for first/last band
# excluding outliers (1.5*std away from mean)
gaps = np.array(gaps)
mean_gap = np.array(
[i for i in gaps if abs(gaps.mean() - i) < (gaps.std() * 1.5)]).mean()
bands[-1][1] += int(round(mean_gap * white_bleed[1]))
bands[0][0] -= int(round(mean_gap * white_bleed[0]))
if bands[0][0] < 0:
bands[0][0] = 0
return (bands)
``` |
{
"source": "2e0byo/form-site",
"score": 3
} |
#### File: form-site/backend/backend.py
```python
from json import dump, load
from pathlib import Path
from bottle import post, request, route, run, static_file
STATIC = Path(__file__).parent.parent / "static"
DB = Path(__file__).parent.parent / "data.json"
data = []
if DB.exists():
with DB.open("r") as f:
data = load(f)
def db_add(response: dict):
data.append(response)
with DB.open("w") as f:
dump(data, f)
@post("/form")
def form():
db_add(dict(request.forms.items()))
return static_file("thanks.html", root=STATIC)
@route("/")
def homepage():
return static_file("form.html", root=STATIC)
if __name__ == "__main__":
run(host="localhost", port=8225)
``` |
{
"source": "2e0byo/label",
"score": 3
} |
#### File: 2e0byo/label/label.py
```python
from argparse import ArgumentParser
from subprocess import run
from textwrap import wrap
from itertools import chain
LINE_LENGTH = 12
MAX_LINES = 3
PRINT_CMD = "enscript -fCourier-Bold16 --no-header -r".split(" ")
def format_msg(msg, multipage=False):
lines = (wrap(x, LINE_LENGTH) for x in msg.splitlines())
lines = list(chain.from_iterable(lines))
if not multipage and len(lines) > MAX_LINES:
raise ValueError("Too many lines in input to fit on page.")
# centre vertically
while len(lines) < MAX_LINES:
if len(lines) < MAX_LINES:
lines.insert(0, "")
if len(lines) < MAX_LINES:
lines.append("")
return "\n".join("{x:^{len}}".format(x=x, len=LINE_LENGTH) for x in lines)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--multipage", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("MSG", nargs="+")
args = parser.parse_args()
msg = format_msg(" ".join(args.MSG), multipage=args.multipage)
if not args.dry_run:
run(PRINT_CMD, input=msg, encoding="utf8")
else:
print(msg)
``` |
{
"source": "2e0byo/miniature-lighting-desk",
"score": 2
} |
#### File: miniature-lighting-desk/tests/test_server.py
```python
import asyncio
from functools import partial
import pytest
from fastapi_websocket_rpc import RpcMethodsBase, WebSocketRpcClient
from miniature_lighting_desk.test_server import MockServer
async def run_client(uri, method, **kwargs):
async with WebSocketRpcClient(uri, RpcMethodsBase()) as client:
res = await getattr(client.other, method)(**kwargs)
try:
return int(res.result)
except ValueError:
return res.result
@pytest.fixture
def Server():
s = MockServer()
with s.run_in_thread() as cont:
yield cont, partial(run_client, f"ws://localhost:{s.port}{s.endpoint}")
@pytest.mark.asyncio
async def test_init(Server):
server, run_method = Server
assert [await server.get_brightness(channel=i) for i in range(8)] == [0] * 8
@pytest.mark.asyncio
async def test_set(Server):
server, run_method = Server
for i in range(8):
assert await server.get_brightness(channel=i) != 199
await server.set_brightness(channel=i, val=199)
assert await server.get_brightness(channel=i) == 199
@pytest.mark.asyncio
async def test_ping(Server):
server, run_method = Server
assert await run_method("ping") == "hello"
@pytest.mark.asyncio
async def test_rpc(Server):
server, run_method = Server
for i in range(8):
assert await run_method("get_brightness", channel=i) != 199
await run_method("set_brightness", channel=i, val=199)
assert await run_method("get_brightness", channel=i) == 199
``` |
{
"source": "2e0byo/OfficiumDivinum",
"score": 2
} |
#### File: officiumdivinum/objects/datastructures.py
```python
from dataclasses import dataclass
from datetime import datetime
from functools import total_ordering
from typing import List
from typing import Union
import pylunar
from ..DSL import dsl_parser
from .divinumofficium_structures import feria_ranks
from .divinumofficium_structures import latin_feminine_ordinals
from .divinumofficium_structures import new_rank_table
from .divinumofficium_structures import traditional_rank_lookup_table
from .divinumofficium_structures import typo_translations
from .html_render import render_template
"""
Since sometimes we call parsers manually, we enforce only parsing for
calendars for which we have built Calendar() objects, otherwise we
wouldn't know what to do with the generated data.
"""
valid_calendars = [
"1955",
"1960",
] # list of valid calendars. Might need an object list instead. Or perhaps don't check and rely on in
"""Valid types of day."""
valid_day_types = ["de Tempore", "Sanctorum"]
class RankException(Exception):
""""""
Rank = None # temporary var to allow circular classing....
class Renderable:
"""
Base class for renderable objects.
Derived objects should set their `.template` attribute. Be sure
actually to create the template (in `api/templates`) before calling
the method.
"""
def html(self):
"""Render self as html."""
return render_template(f"html/{self.template}.html", obj=self)
def latex(self):
"""Render self as latex."""
return render_template(f"tex/{self.template}.tex", obj=self)
def gabc(self):
"""Render self as gabc."""
return render_template(f"gabc/{self.template}.gabc", obj=self)
def DSL(self):
"""Render self as dsl."""
@dataclass
class StrObj(Renderable):
content: str
template = "string"
@dataclass
class Hymn(Renderable):
"""Class to represent a hymn."""
name: str
content: List[List[str]]
template = "hymn"
@dataclass
class Antiphon(Renderable):
"""Class to represent an anitphon (for psalms)."""
content: str
template = "string"
@dataclass
class Octave:
"""Class to represent an octave, which may be of various kinds
rank: rank of the octave."""
name: str
privileged: bool = None
rank: Rank = None
@dataclass
class Rank:
"""
Class to represent the rank of a particular feast.
This must be
able to return a machine-readable and sortable object (we use an
integer from 0 with 0 = feria) and also preserve the particular
name we use in any given calendar .
Parameters
----------
Returns
-------
"""
name: str = "Feria"
defeatable: bool = None
octave: Octave = None
def __post_init__(self):
try:
self.name = typo_translations[self.name]
except KeyError:
pass
name = self.name.strip().lower()
if (
name not in traditional_rank_lookup_table.keys()
and name not in new_rank_table
and name not in feria_ranks.keys()
):
raise RankException(f"Rank {self.name} not valid")
def _to_int(self):
""""""
name = self.name.strip().lower()
try:
val = traditional_rank_lookup_table[name]
except ValueError:
try:
val = new_rank_table.index(name)
except ValueError:
return feria_ranks[name]
return val if not self.defeatable else val - 0.1
class CalendarException(Exception):
""""""
@dataclass
class Commemoration:
"""
A class to represent a commemoration.
This might be a bit more in depth than we need to go.
Parameters
----------
Returns
-------
"""
name: str
rank: Rank
@dataclass
class Celebration:
"""
A class to represent a celebration.
This might be a bit more in depth than we need to go.
Parameters
----------
Returns
-------
"""
name: str
@dataclass
class Date:
"""
A class to represent a date which may or may not need resolving for a specific year.
Parameters
----------
Returns
-------
"""
rules: str
date: datetime = None
def resolve(self, year: int):
"""
Parameters
----------
year: int :
Returns
-------
"""
self.date = dsl_parser(self.rules, year)
return self.date
@total_ordering
@dataclass
class Feast:
"""
Object to represent a liturgical day.
These are sortable by rank, although sorting objects with distinct
calendars is unsupported and will probably return nonsense.
Multiple Feast objects can meaningfully exist for a given calendar
day, if they have different `Type` s. (I.e. tempore/sanctorum.)
This might be a design decision worth re-thinking down the line.
Parameters
----------
Returns
-------
"""
rank: Rank
date: Union[datetime, Date]
calendar: str
Type: str
name: str = None
celebration: Celebration = None
commemorations: List = None
qualifiers: List = None # for matching things up in DO's weird system
def __post_init__(self):
"""Check constraints."""
self.calendar = str(self.calendar)
if self.calendar not in valid_calendars:
raise CalendarException(
f"Invalid calendar supplied {self.calendar}, "
f"valid are {valid_calendars}"
)
if self.Type not in valid_day_types:
raise CalendarException(
f"Invalid Type supplied {self.Type}, " f"valid are {valid_day_types}"
)
if not self.name and self.celebration:
self.name = self.celebration.name
def __lt__(self, other):
return self.rank._to_int() < other.rank._to_int()
def __eq__(self, other):
return self.rank._to_int() == other.rank._to_int()
@dataclass
class Feria:
"""Class to represent a feria, which can be quite a complicated thing."""
name: str
def _to_int(self):
""""""
return feria_ranks[self.name]
@dataclass
class MartyrologyInfo:
"""Class to represent a martyrology entry which should be appended after the date
and before the content fixed for the day."""
date: Date
content: List
@dataclass
class Martyrology(Renderable):
"""Class to represent the martyrology for a given day."""
date: Date
old_date: str
content: List
ordinals: List = None
moonstr: str = " Luna {ordinal} Anno Domini {year}"
template = "martyrology"
def __post_init__(self):
if not self.ordinals:
self.ordinals = latin_feminine_ordinals
def lunar(self, date: datetime):
"""
Parameters
----------
date: datetime :
Returns
-------
"""
m = pylunar.MoonInfo((31, 46, 19), (35, 13, 1)) # lat/long Jerusalem
m.update((date.year, date.month, date.day, 0, 0, 0))
age = round(m.age())
return age
def render(self, year: int):
"""
Parameters
----------
year: int :
Returns
-------
"""
date = self.date.resolve(year)
ordinal = self.ordinals[self.lunar(date) - 1]
old_date = self.old_date + self.moonstr.format(
ordinal=ordinal.title(), year=year
)
# may need to abstract this to handle translations
return old_date, self.content
def html(self):
self.old_date, self.content = self.render(self.year)
return super().html()
@dataclass
class Verse(Renderable):
"""
Parameters
----------
number: int : Verse number.
chapter: int : Chapter number.
book: str: Book (can be None to indicate 'don't print')
content: str : Verse content.
version: str or None: Version in question (probably not worth storing here).
"""
number: int
chapter: int
book: Union[str, None]
content: str
version: str = None # in case we want it later
template = "verse"
@dataclass
class Reading(Renderable):
""""""
name: str
ref: str
content: List[Union[Verse, StrObj]]
description: str = None
template = "reading"
@dataclass
class Rubric(Renderable):
"""Class to represent a rubric displayed by itself."""
content: str
template = "string"
@dataclass
class Gloria(Renderable):
"""Class to represent a gloria in any language."""
content: List[str]
template = "gloria"
@dataclass
class Psalm(Renderable):
"""Class to represent a psalm."""
content: List[Verse]
ref: str
gloria: Gloria
suppress_gloria: bool = None
template = "psalm"
@dataclass
class Responsory(Renderable):
"""
Class to represent a responsory.
Later we may want to make this more explicit.
Parameters
----------
content: List[tuple]: List of lhs, rhs tuples.
Returns
-------
"""
content: List[tuple]
template = "responsory"
@dataclass
class Incipit(Renderable):
"""Class to represent an incipit.
Paramters
---------
name: str: Name of *an incipit* in the right language.
content: List[Versicle]: list of Responsory objects.
"""
name: str
content: List[Responsory]
template = "incipit"
@dataclass
class Collect(Renderable):
"""Class to represent a collect."""
content: str
termination: str
template = "collect"
@dataclass
class Blessing(Renderable):
"""Class to represent a blessing."""
content: str
template = "blessing"
@dataclass
class Rules:
""""""
source: Feast = None
nocturns: int = None
sunday_psalms: bool = None
antiphons_at_hours: bool = None
proper_hymns: List[str] = None
Te_Deum: bool = None
skip_psalm_93 = None
lessons: int = None
doxology: str = None
feria: bool = None
festum_Domini: bool = None
gloria_responsory: bool = None
vespers_antiphons: List[str] = None
second_chapter_verse: bool = None
second_chapter_verse_lauds: bool = None
commemoration_key: int = None
duplex: bool = None
hymn_terce: bool = None
crossref: Feast = None
one_antiphon: bool = None
athanasian_creed: bool = None
stjamesrule: str = None
psalm_5_vespers: int = None
psalm_5_vespers_3: int = None
initia_cum_responsory: bool = None
invit2: bool = None
laudes2: bool = None
laudes_litania: bool = None
first_lesson_saint: bool = None
limit_benedictiones_oratio: bool = None
minores_sine_antiphona: bool = None
no_commemoration: bool = None
no_suffragium: bool = None
no_commemoration_sunday: bool = None
omit: List = None
sunday_collect: bool = None
preces_feriales: bool = None
proper: bool = None
psalm_53_prime: bool = None
psalmi_minores_dominica: bool = None
```
#### File: officiumdivinum/objects/html_render.py
```python
import flask
def render_template(template, **kwargs):
app = flask.current_app
try:
return flask.render_template(template, **kwargs)
except Exception:
with app.app_context(), app.test_request_context():
return flask.render_template(template, **kwargs)
```
#### File: officiumdivinum/parsers/T2obj.py
```python
import re
from pathlib import Path
from ..DSL import days
from ..DSL import months
from ..DSL import ordinals
from ..DSL import specials
from ..objects import Date
from ..objects import Feast
from ..objects import Octave
from ..objects import Rank
def parse_DO_sections(lines: list) -> list:
"""
Parse DO files into lists per section.
Parameters
----------
lines: list : lines to break into sections.
Returns
-------
A list of sections.
"""
sections = {}
current_section = None
content = []
for line in lines:
line = line.strip()
if line == "_":
continue
if line.startswith("[") and line.endswith("]"):
if current_section:
try:
while content[-1].strip() == "":
content.pop()
except IndexError:
content = None
sections[current_section] = content
current_section = line.strip("[]")
content = []
else:
content.append(line)
return sections
def parse_file(fn: Path, calendar: str) -> Feast:
"""
Parse provided file.
Parameters
----------
fn: Path : File to parse.
calendar : str: Calendar to use (mainly for naming at this stage).
Returns
-------
A Feast object represeting the day.
"""
try:
after, day = fn.stem.split("-")
except ValueError:
return # give up
qualifiers = None
name = None
try:
int(after)
date = after[:-1]
week = int(after[-1])
except ValueError:
date, week = re.findall(r"([A-Z][a-z]+)([0-9]+)", after)[0]
try:
day = int(day)
except ValueError:
day, qualifiers = re.findall(r"([0-9])(.+)", day)[0]
try:
date = f"1 {months[int(date) - 1]}"
except ValueError:
# for non month temporal it the reference *might* be a Sunday (e.g. Easter).
date = specials[date]
datestr = f"{ordinals[int(week)]} Sun after {date}"
# datestr = f"{ordinals[week]} Sun on or after {date}"
day = int(day)
if day != 0:
datestr = f"{days[day]} after {datestr}"
lines = fn.open().readlines()
sections = parse_DO_sections(lines)
rank = "Feria"
try:
name, rank, rankno, source = [
*sections["Rank1960"][0].split(";;"),
None,
None,
None,
None,
][:4]
except KeyError:
try:
name, rank, rankno, source = [
*sections["Rank"][0].split(";;"),
None,
None,
None,
None,
][:4]
except KeyError:
pass
try:
rank, octave = rank.split("cum")
privileged = True if "privilegiata" in octave else False
octave_rank = re.findall(r"Octava ([Pp]rivilegiata)* (.*)", octave)[0][1]
rank = Rank(
rank, octave=Octave(octave, privileged=privileged, rank=Rank(octave_rank))
)
except ValueError:
rank = Rank(rank)
celebration = None
commemorations = None
for section, content in sections.items():
if section == "Rule":
pass
# pass # later implement handling here
return Feast(
rank,
Date(datestr),
calendar,
"de Tempore",
name,
celebration,
commemorations,
qualifiers,
)
``` |
{
"source": "2e0byo/pygallica-autobib",
"score": 3
} |
#### File: 2e0byo/pygallica-autobib/demo_image_processing.py
```python
from bs4 import BeautifulSoup
import numpy as np
from PIL import ImageOps
from gallica_autobib.gallipy import Resource
from gallica_autobib.process import extract_image
from PyPDF4 import PdfFileReader
from io import BytesIO
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.patches import Rectangle
from collections import namedtuple
Point = namedtuple("Point", ["x", "y"])
Box = namedtuple("Box", ["upper", "lower"])
ark = "https://gallica.bnf.fr/ark:/12148/bpt6k65545564"
r = Resource(ark)
def fetch_stuff(pno):
pg = r.content_sync(startview=pno, nviews=1, mode="pdf").value
reader = PdfFileReader(BytesIO(pg))
data, type_ = extract_image(reader.getPage(2))
ocr = r.ocr_data_sync(view=pno).value
soup = BeautifulSoup(ocr.decode())
upper_bound = [0, 0]
lower_bound = [0, 0]
page = soup.find("page")
height, width = int(page.get("height")), int(page.get("width"))
xscale = data.height / height
yscale = data.width / width
height *= yscale
printspace = soup.find("printspace")
text_height = round(int(printspace.get("height")) * yscale)
text_width = round(int(printspace.get("width")) * xscale)
vpos = int(printspace.get("vpos")) * yscale
hpos = int(printspace.get("hpos")) * xscale
upper = Point(round(hpos), round(vpos))
return upper, text_height, text_width, data, height
def gen_doc_data():
pno = 128
upper, text_height, text_width, data, height = fetch_stuff(pno)
fig, ax = plt.subplots()
plt.imshow(data)
text_box = ax.add_patch(
Rectangle(
upper, text_width, text_height, edgecolor="red", facecolor="none", lw=2
)
)
fig.savefig(
"docs/img/content_box.svg", bbox_inches="tight", transparent=True, dpi=72
)
ax2 = ax.twiny()
a = np.array(ImageOps.grayscale(data))
mean = a.mean(axis=1)
ax2.plot(mean, range(len(mean)), label="mean")
gradient = np.gradient(mean) + 70
ax2.plot(gradient, range(len(gradient)), color="green", label="differential")
plt.legend()
fig.savefig("docs/img/mean.svg", bbox_inches="tight", transparent=True, dpi=72)
gstd = np.std(gradient)
gmean = gradient.mean()
ax2.vlines([gmean - 1.5 * gstd, gmean + 1.5 * gstd], 0, data.height, color="orange")
fig.savefig(
"docs/img/mean_bounds.svg", bbox_inches="tight", transparent=True, dpi=72
)
search = round(height * 0.05)
upper_bound = upper.y - search
search_height = text_height + 2 * search
search_upper = Point(upper.x, upper_bound)
search_box = ax.add_patch(
Rectangle(
search_upper,
text_width,
search_height,
edgecolor="green",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/search.svg", bbox_inches="tight", transparent=True, dpi=72)
upper_search = gradient[upper_bound : upper.y]
lower_search = gradient[upper.y + text_height : upper_bound + search_height]
lower_thresh = gmean - 1.5 * gstd
upper_thresh = gmean + 1.5 * gstd
peaked = 0
for up, x in enumerate(reversed(upper_search)):
if not peaked and x >= upper_thresh:
peaked = 1
if peaked and x <= lower_thresh:
peaked = 2
print("Line above detected.")
break
up = up if peaked == 2 else 0
peaked = 0
for down, x in enumerate(lower_search):
if not peaked and x <= lower_thresh:
peaked = 1
if peaked and x >= upper_thresh:
peaked = 2
print("Line below detected.")
break
down = down if peaked == 2 else 0
final_upper = Point(upper.x, upper.y - up)
final_height = text_height + up + down
search_box = ax.add_patch(
Rectangle(
final_upper,
text_width,
final_height,
edgecolor="pink",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/searched.svg", bbox_inches="tight", transparent=True, dpi=72)
stretch = round(height * 0.005)
streched_upper = Point(final_upper[0] - stretch, final_upper[1] - 2 * stretch)
stretched_width = text_width + 2 * stretch
stretched_height = final_height + 4 * stretch
fig, ax = plt.subplots()
plt.imshow(data)
final_box = ax.add_patch(
Rectangle(
streched_upper,
stretched_width,
stretched_height,
edgecolor="black",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/stretched.svg", bbox_inches="tight", transparent=True, dpi=72)
def process_page(pno):
upper, text_height, text_width, data, height = fetch_stuff(pno)
fig, ax = plt.subplots()
plt.imshow(data)
text_box = ax.add_patch(
Rectangle(
upper, text_width, text_height, edgecolor="red", facecolor="none", lw=2
)
)
ax2 = ax.twiny()
a = np.array(ImageOps.grayscale(data))
mean = a.mean(axis=1)
gradient = np.gradient(mean) + 70
ax2.plot(gradient, range(len(gradient)), color="green", label="differential")
gstd = np.std(gradient)
gmean = gradient.mean()
ax2.vlines([gmean - 1.5 * gstd, gmean + 1.5 * gstd], 0, data.height, color="orange")
search = round(height * 0.05)
upper_bound = upper.y - search
search_height = text_height + 2 * search
search_upper = Point(upper.x, upper_bound)
search_box = ax.add_patch(
Rectangle(
search_upper,
text_width,
search_height,
edgecolor="green",
facecolor="none",
lw=1,
)
)
upper_search = gradient[upper_bound : upper.y]
lower_search = gradient[upper.y + text_height : upper_bound + search_height]
lower_thresh = gmean - 1.5 * gstd
upper_thresh = gmean + 1.5 * gstd
peaked = 0
for up, x in enumerate(reversed(upper_search)):
if not peaked and x >= upper_thresh:
peaked = 1
if peaked and x <= lower_thresh:
peaked = 2
print("Line above detected.")
break
up = up if peaked == 2 else 0
peaked = 0
for down, x in enumerate(lower_search):
if not peaked and x <= lower_thresh:
peaked = 1
if peaked and x >= upper_thresh:
peaked = 2
print("Line below detected.")
break
down = down if peaked == 2 else 0
final_upper = Point(upper.x, upper.y - up)
final_height = text_height + up + down
search_box = ax.add_patch(
Rectangle(
final_upper,
text_width,
final_height,
edgecolor="pink",
facecolor="none",
lw=1,
)
)
stretch = round(height * 0.005)
streched_upper = Point(final_upper[0] - stretch, final_upper[1] - 2 * stretch)
stretched_width = text_width + 2 * stretch
stretched_height = final_height + 4 * stretch
final_box = ax.add_patch(
Rectangle(
streched_upper,
stretched_width,
stretched_height,
edgecolor="black",
facecolor="none",
lw=1,
)
)
gen_doc_data()
# process_page(128)
# process_page(136)
# process_page(79)
# process_page(136)
```
#### File: pygallica-autobib/tests/test_module.py
```python
import pytest
from gallica_autobib.models import Article, BibBase, Book, Collection, Journal
def test_article():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
)
assert isinstance(a, Article)
assert isinstance(a._source(), Journal)
assert (
a.generate_query()
== 'bib.publicationdate all "1930" and bib.title all "La vie spirituelle" and bib.recordtype all "per"'
)
assert a._source().translate()["title"] == "La vie spirituelle"
assert isinstance(a.pages, list)
assert isinstance(a.pages[0], str)
assert a.name() == "Pour lire saint Augustin (M.-D. Chenu)"
assert a.name(short=4) == "Pour (M.-D)"
ahash = a.__hash__()
assert ahash == hash(a)
a.publicationdate = 1940
assert ahash != hash(a)
def test_book():
a = Book(title="Title", publisher="Cerf", year=1901, author="me")
assert isinstance(a, Book)
assert a._source() is a
assert a._source().translate() == {
k: v for k, v in dict(a).items() if k != "editor"
}
def test_collection():
a = Collection(title="Title", publisher="Cerf", year=1901, author="me")
assert isinstance(a, Collection)
assert a._source() is a
assert a._source().translate() == {
k: v for k, v in dict(a).items() if k != "editor"
}
query_candidates = [
[
{"title": "La vie spirituelle", "recordtype": "per"},
'bib.title all "la vie spirituelle" and bib.recordtype all "per',
],
[{"title": "la vie spirituelle"}, 'bib.title all "la vie spirituelle'],
]
@pytest.mark.parametrize("kwargs,outstr", query_candidates)
def test_assemble_query(kwargs, outstr):
assert BibBase.assemble_query(kwargs=outstr)
def test_bibtex_render_article(file_regression):
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
volume=12,
number=1,
)
file_regression.check(a.bibtex(), extension=".bib")
@pytest.fixture
def article():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
)
yield a
```
#### File: pygallica-autobib/tests/test_parse_gallica.py
```python
from pathlib import Path
import pytest
test_tocs = ["toc-no-cells.xml", "toc-with-cells.xml", "mix.xml"]
@pytest.mark.parametrize("xml", test_tocs)
def test_parse_toc(data_regression, gallica_resource, xml):
with (Path("tests/test_parse_gallica") / xml).open() as f:
data_regression.check(gallica_resource.parse_gallica_toc(f.read().strip()))
```
#### File: pygallica-autobib/tests/test_real_queries.py
```python
import pytest
from gallica_autobib.models import Article
from gallica_autobib.query import Query
def test_match_query():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="Daniélou",
year=1930,
)
q = Query(a)
resp = q.run()
assert resp.target
assert resp.candidate.journaltitle == "La Vie spirituelle, ascétique et mystique"
candidates = [
[
Article(
journaltitle="La Vie spirituelle",
author="<NAME>",
pages=list(range(547, 552)),
volume=7,
year=1923,
title="Ascèse et péché originel",
),
dict(ark="http://catalogue.bnf.fr/ark:/12148/cb34406663m"),
]
]
@pytest.mark.parametrize("candidate,params", candidates)
def test_queries(candidate, params):
q = Query(candidate)
resp = q.run()
assert resp.target
assert resp.candidate.ark == params["ark"]
``` |
{
"source": "2e0byo/typing-game",
"score": 3
} |
#### File: typing-game/typing_game/game.py
```python
import csv
import curses
import sys
from appdirs import AppDirs
from random import choices, randint
from time import sleep
from pathlib import Path
from datetime import datetime
from .score import Score
from .terminal import Terminal
from .timer import PausableMonotonic
from .word import Word
words = ("add", "subtract", "next")
weights = {k: 100 for k in words}
INPUT_LOOP_DELAY = 0.001
class Game:
def __init__(
self,
terminal: Terminal,
user,
dictionary: Path = None,
):
self.timer = PausableMonotonic()
self.running = True
self.terminal = terminal
self.initial_delay = 0.5
self.initial_new_word_pause = 3
self.AppDirs = AppDirs("typing-game", "JohnMorris")
self.user_dir = Path(self.AppDirs.user_data_dir)
self.user_dir.mkdir(exist_ok=True)
self.user = user
self.load_highscores()
if dictionary:
if dictionary.suffix == "csv":
self.load_words(dictionary)
else:
self.load_weight_by_length(dictionary)
def load_weight_by_length(self, dictionary: Path):
words, weights = [], {}
with dictionary.open() as f:
for word in f:
word = word.strip()
words.append(word)
weights[word] = max(1, 100 - len(word) * 10)
self.words = words
self.weights = weights
assert len(words) == len(weights)
@property
def delay(self):
return self.initial_delay / (self.score.level + 1)
@property
def new_word_pause(self):
return self.initial_new_word_pause / (self.score.level + 1)
def draw_word(self):
_, max_x = self.terminal.main_win.getmaxyx()
word = choices(self.words, self.weights.values())[0]
x = randint(0, max_x - len(word) - 1)
word = Word(
word, self.weights[word], self.terminal, x, 0, self.score, self.timer
)
return word
def main(self, stdscr):
"""Main loop."""
self.terminal.stdscr = stdscr
stdscr.clear()
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i, i, -1)
curses.curs_set(False)
self.score = Score(self.terminal)
self.score.display()
max_y, max_x = self.terminal.main_win.getmaxyx()
max_y -= 1
words = []
selected = False
start = self.timer()
word = None
while self.running:
now = self.timer()
if not words or now - start > self.new_word_pause:
words.append(self.draw_word())
start = now
for w in words:
if w.y:
w.clear()
w.y += 1
w.display()
if w.y == max_y:
weights[w.word] = w.score(self.timer())
try:
words.remove(w)
if word is w:
selected = False
except ValueError:
pass
if not selected:
word = self.select_word(words)
if not word:
continue
selected = True
if self.input_loop(word):
try:
words.remove(word)
except ValueError:
continue
selected = False
def getch(self):
ch = self.terminal.main_win.getch()
if ch == 27:
self.menu()
return -1
else:
return ch
def select_word(self, words):
"""Get word to type."""
now = start = self.timer()
scr = self.terminal.main_win
while now - start < self.delay:
ch = self.getch()
if ch != -1:
k = chr(ch)
for word in words:
if word.next_char == k:
if not self.score.start_time:
self.score.start_time = now
word.submit(k)
return word
now = self.timer()
return None
def input_loop(self, word: Word):
"""Input loop in game."""
now = start = self.timer()
while now - start < self.delay:
ch = self.getch()
if ch != -1:
ret = word.submit(chr(ch))
word.display()
if ret is not False:
word.display()
if ret:
word.clear()
weights[word.word] = ret
return True
sleep(INPUT_LOOP_DELAY)
now = self.timer()
return False
def menu(self):
self.timer.pause()
options = {
"r": ("Return to Game", None),
"q": ("Quit", self.quit),
}
win = self.terminal.menu_win
for row, (key, (text, fn)) in enumerate(options.items()):
win.addstr(row + 1, 2, key, curses.A_BOLD)
win.addstr(row + 1, 4, text)
win.addstr(row + 2, 1, str(self.highscoresf))
while True:
key = win.getkey()
entry = options.get(key)
if entry:
fn = entry[1]
if fn:
fn()
else:
break
del win
self.terminal.main_win.touchwin()
self.terminal.main_win.refresh()
self.timer.unpause()
@property
def highscoresf(self):
return self.user_dir / "highscores.csv"
def load_highscores(self):
try:
with self.highscoresf.open("r") as f:
self._highscores = list(csv.DictReader(f))
except Exception:
self._highscores = []
def highscores(self, user):
return [x for x in self._highscores if x["User"] == user]
def save_highscores(self):
with self.highscoresf.open("w") as f:
writer = csv.DictWriter(f, fieldnames=("User", "Date", "Score"))
writer.writeheader()
writer.writerows(self._highscores)
@property
def dict_path(self):
return self.user_dir / "words.csv"
def load_words(self, path: Path = None):
path = path or self.dict_path
words, weights = [], {}
with path.open("r") as f:
reader = csv.DictReader(f)
for row in reader:
words.append(row["Word"])
weights[row["Word"]] = row["Weight"]
self.words = words
self.weights = weights
def save_words(self):
with self.dict_path.open("w") as f:
writer = csv.DictWriter(f, fieldnames=("Words", "Weight"))
writer.writeheader()
for word, weight in zip(self.words, self.weights.values()):
writer.writerow(dict(word=word, weight=weight))
def quit(self):
self._highscores.append(
dict(
User=self.user,
Date=datetime.now().isoformat(),
Score=self.score.current_score,
)
)
self.save_highscores()
sys.exit()
game = Game(Terminal(), user="Emma", dictionary=Path("wordlist.txt"))
curses.wrapper(game.main)
``` |
{
"source": "2e0byo/YADC",
"score": 2
} |
#### File: YADC/tests/conftest.py
```python
from pathlib import Path
import sys
import pytest
import shutil
sys.path.insert(0, str(Path(__file__).parent.parent))
from yadc.browser import Browser
def executable_path(*tries):
"""Find path to executable, or throw."""
path = None
for ex in tries:
path = shutil.which(ex)
if path:
break
if not path:
raise Exception(f"Unable to find path to {tries[0]}")
return path
@pytest.fixture
def chrome():
return executable_path(
"chrome",
"chromium",
"google-chrome-stable",
"chrome.exe",
r"C:\Program Files\Google\Chrome\Application\chrome.exe",
)
@pytest.fixture
def chromedriver():
return executable_path("chromedriver", "chromedriver.exe")
@pytest.fixture
def tor():
return executable_path("tor", "Tor/tor.exe")
@pytest.fixture
def browser(chrome, chromedriver):
return Browser(chrome=chrome, chromedriver=chromedriver)
```
#### File: YADC/yadc/cli.py
```python
def main():
print(
"""
YADC does not currently have a CLI.
Instead, you should write your own script setting up your environment properly.
See https://github.com/2e0byo/YADC/blob/master/main.py for a template to get
you started quickly.
Writing a CLI would not be too difficult: if you write run, please contribute
it back to the project!
"""
)
``` |
{
"source": "2e666f6f/pantilt-scanner",
"score": 4
} |
#### File: Code/python/Scanner.py
```python
from SerialDevice import SerialDevice
class Scanner:
def __init__(self, max_angle=170) -> None:
self.dev = SerialDevice()
self._ready = False
self.max_angle = max_angle
self.pan_angle = None
self.tilt_angle = None
print('syncing with scanner...')
self.center()
@property
def ready(self) -> bool:
return self._ready
def _read_until_ready(self) -> list:
'''
Reads data from the scanner until it is ready for a new instruction. Any data read is
returned as a list of lines.
Returns:
list: the lines of data received before 'ready'
'''
data = []
# loop if not ready or data waiting in input buffer
while not self._ready or self.dev.ser.in_waiting:
# read data from the input buffer, add it to the list if it isn't 'ready'
line = self.dev.read()
if line.strip() == 'ready':
self._ready = True
# don't add empty strings to the list
elif line.strip() != '':
data.append(line)
return data
def delay(self, time: int) -> None:
'''
Instructs the scanner to wait for a specific amount of time, then waits for a ready signal.
Intended to be used to mitigate the scanner shaking and messing up the readings.
Args:
time (int): The amount of time to wait in milliseconds.
'''
self.dev.write('DELAY|{}'.format(time))
self._ready = False
self._read_until_ready()
def pan(self, angle: int) -> None:
'''
Instructs the scanner to pan to an angle, then waits for a ready signal.
Args:
angle (int): the angle to pan to.
'''
# don't send a command if the angle is invalid
if angle >= 0 and angle <= self.max_angle:
self.dev.write('PAN|{}'.format(angle))
self._ready = False
self.pan_angle = angle # keep track of the new angle
self._read_until_ready()
def tilt(self, angle: int) -> None:
'''
Instructs the scanner to tilt to an angle, then waits for a ready signal.
Args:
angle (int): the angle to tilt to.
'''
# don't send a command if the angle is invalid
if angle >= 0 and angle <= self.max_angle:
self.dev.write('TILT|{}'.format(angle))
self._ready = False
self.tilt_angle = angle # keep track of the new angle
self._read_until_ready()
def read_sensor(self) -> tuple:
'''
Instructs the scanner to send a sensor reading then waits for the reading and a ready signal. The data is then
cleaned and returned as a tuple.
This function assumes the data is sent as 3 separate lines consisting of the axis and coordinate. For example,
a sensor reading of 445 at pan angle 10 tilt angle 35 should be sent as:
X10
Y35
Z445
Returns:
tuple: (int: pan angle, int: tilt angle, int: sensor reading)
'''
self.dev.write('READSENSOR') # send the instruction
self._ready = False
received = self._read_until_ready()
# format the received data according to the expected form shown in the method docstring
cleaned_data = [data.strip() for data in received if data.strip()[0] in ('X', 'Y', 'Z')]
return tuple(val for val in cleaned_data)
def center(self) -> None:
'''
Instructs the scanner to move to its center point, then checks the input buffer until it is empty and the
scanner is ready for instruction.
'''
self.pan_angle = self.max_angle/2
self.tilt_angle = self.max_angle/2
self.pan(self.pan_angle)
self.tilt(self.tilt_angle)
self._read_until_ready()
``` |
{
"source": "2earaujo/pythonbirds",
"score": 4
} |
#### File: pythonbirds/oo/carro.py
```python
from motor import Motor
from direcao import Direcao
class Carro():
def __init__(self, arg_direcao, arg_motor):
self.motor = arg_motor
self.direcao = arg_direcao
def calcular_velocidade(self):
return self.motor.velocidade
def acelerar(self):
self.motor.acelerar()
def frear(self):
self.motor.frear()
def calcular_direcao(self):
return self.direcao.direcao
def girar_a_direita(self):
self.direcao.girar_a_direita()
def girar_a_esquerda(self):
self.direcao.girar_a_esquerda()
if __name__ == '__main__':
motor = Motor()
direcao = Direcao()
carro = Carro(direcao, motor)
print('Velocidade carro: ',carro.calcular_velocidade())
print('Acelera carro')
carro.acelerar()
print('Calcula velocidade: ',carro.calcular_velocidade())
print('Acelera carro')
carro.acelerar()
print('Velocidade atual: ',carro.calcular_velocidade())
carro.frear()
print(carro.calcular_velocidade())
print('Direcao atual: ',carro.calcular_direcao())
print('Giro a direita')
carro.girar_a_direita()
print('Direcao atual: ',carro.calcular_direcao())
print('Giro a direita')
carro.girar_a_direita()
print('Direcao atual: ',carro.calcular_direcao())
print('Giro a direita')
carro.girar_a_direita()
print('Direcao atual: ',carro.calcular_direcao())
print('Giro a direita')
carro.girar_a_direita()
print('Direcao atual: ',carro.calcular_direcao())
print('Giro a esquerda')
carro.girar_a_esquerda()
print('Direcao atual: ',carro.calcular_direcao())
print('Giro a esquerda')
carro.girar_a_esquerda()
print('Direcao atual: ',carro.calcular_direcao())
``` |
{
"source": "2easy/ctsp",
"score": 3
} |
#### File: 2easy/ctsp/greedy.py
```python
import helpers
class Greedy:
def __init__(self, dists):
self.dists = dists[:]
self.ncities = len(self.dists)
self.solution = []
self.cost = 0
def solve(self):
# generate all 3-cities tours
tours = helpers.all_3tours(range(1,len(self.dists)), self.dists)
# and sort them according to their length
tours.sort()
# choose best 3-tours
visited = set([])
for t in tours:
if set(t[1:])&visited == set([]):
for c in t[1:]:
visited.add(c)
self.solution.append(t[1:])
# and then append the cities that hadn't been choosen
if len(self.dists) % 3 != 1:
all_cities = set(range(1, self.ncities)) # do NOT include base
not_visited = tuple(all_cities - visited)
self.solution.append(not_visited)
self.cost = helpers.compute_cost(self.solution, self.dists)
return self.solution[:]
``` |
{
"source": "2easy/skyline",
"score": 2
} |
#### File: skyline/crucible/crucible_algorithms.py
```python
from __future__ import division
# @modified 20180910 - Task #2588: Update dependencies
# matplotlib.use is now required before statsmodels.api
from matplotlib import use as matplotlib_use
matplotlib_use('Agg')
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
# @modified 20160821 - Issue #23 Test dependency updates
# Use Agg for matplotlib==1.5.2 upgrade, backwards compatibile
# @modified 20180910 - Task #2588: Update dependencies
# import matplotlib
# matplotlib.use('Agg')
import matplotlib
import matplotlib.pyplot as plt
import traceback
import logging
import os
import time
from sys import version_info
from os.path import join
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
from settings import (
ALGORITHMS,
MIRAGE_ALGORITHMS,
PANDAS_VERSION,
)
skyline_app = 'crucible'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
python_version = int(version_info[0])
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
It must be defined required parameters (even if your algorithm/function does not
need them), as the run_algorithms function passes them to all ALGORITHMS defined
in settings.ALGORITHMS.
"""
def tail_avg(timeseries, end_timestamp, full_duration):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
try:
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
except:
return None
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
if PANDAS_VERSION < '0.17.0':
try:
test_statistic = demedianed.iget(-1) / median_deviation
except:
return None
else:
try:
test_statistic = demedianed.iat[-1] / median_deviation
except:
return None
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
# As per https://github.com/etsy/skyline/pull/104 by @rugger74
# Although never seen this should return False if not > arbitary_value
# 20160523 @earthgecko
return False
def grubbs(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
try:
series = scipy.array([x[1] for x in timeseries])
stdDev = scipy.std(series)
# Issue #27 - Handle z_score agent.py RuntimeWarning - https://github.com/earthgecko/skyline/issues/27
# This change avoids spewing warnings on agent.py tests:
# RuntimeWarning: invalid value encountered in double_scalars
# If stdDev is 0 division returns nan which is not > grubbs_score so
# return False here
if stdDev == 0:
return False
mean = np.mean(series)
tail_average = tail_avg(timeseries, end_timestamp, full_duration)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
except:
return None
def first_hour_average(timeseries, end_timestamp, full_duration):
"""
Calcuate the simple average over 60 datapoints (maybe one hour),
FULL_DURATION seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
try:
int_end_timestamp = int(timeseries[-1][0])
int_start_timestamp = int(timeseries[0][0])
int_full_duration = int_end_timestamp - int_start_timestamp
# Determine data resolution
# last_hour_threshold = int_end_timestamp - (int_full_duration - 3600)
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
# @modified 20160814 - pyflaked
# ten_data_point_seconds = resolution * 10
sixty_data_point_seconds = resolution * 60
sixty_datapoints_ago = int_end_timestamp - sixty_data_point_seconds
last_hour_threshold = int_end_timestamp - (int_full_duration - sixty_datapoints_ago)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries, end_timestamp, full_duration)
return abs(t - mean) > 3 * stdDev
except:
return None
return False
def stddev_from_average(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than one standard
deviation of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
try:
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries, end_timestamp, full_duration)
return abs(t - mean) > 3 * stdDev
except:
return None
return False
def stddev_from_moving_average(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
try:
series = pandas.Series([x[1] for x in timeseries])
if PANDAS_VERSION < '0.18.0':
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
else:
expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()
stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)
if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)
else:
return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]
# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat
except:
return None
return False
def mean_subtraction_cumulation(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than three standard deviations out in cumulative terms
after subtracting the mean from each data point.
"""
try:
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
# @modified 20160814 - pyflaked
# if PANDAS_VERSION < '0.18.0':
# expAverage = pandas.stats.moments.ewma(series, com=15)
# else:
# expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()
if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1)) > 3 * stdDev
else:
return abs(series.iat[-1]) > 3 * stdDev
except:
return None
return False
def least_squares(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
try:
x = np.array([t[0] for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
# @modified 20160814 - pyflaked
# results = np.linalg.lstsq(A, y)
# residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
# Evaluate append once, not every time in the loop - this gains ~0.020 s on
# every timeseries potentially
append_error = errors.append
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
# errors.append(error)
append_error(error)
if len(errors) < 3:
return False
std_dev = scipy.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
except:
return None
return False
def histogram_bins(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to tweak
that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
try:
int_end_timestamp = int(timeseries[-1][0])
int_start_timestamp = int(timeseries[0][0])
int_full_duration = int_end_timestamp - int_start_timestamp
series = scipy.array([x[1] for x in timeseries])
t = tail_avg(timeseries, int_end_timestamp, int_full_duration)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
except:
return None
return False
def ks_test(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
that data distribution for last 10 datapoints (might be 10 minutes) is
different from the last 60 datapoints (might be an hour).
It produces false positives on non-stationary series so Augmented
Dickey-Fuller test applied to check for stationarity.
"""
try:
int_end_timestamp = int(timeseries[-1][0])
# @modified 20160814 - pyflaked
# hour_ago = int_end_timestamp - 3600
# ten_minutes_ago = int_end_timestamp - 600
# Determine resolution of the data set
# reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
ten_data_point_seconds = resolution * 10
ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds
sixty_data_point_seconds = resolution * 60
sixty_datapoints_ago = int_end_timestamp - sixty_data_point_seconds
reference = scipy.array([x[1] for x in timeseries if x[0] >= sixty_datapoints_ago and x[0] < ten_datapoints_ago])
probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_datapoints_ago])
if reference.size < 20 or probe.size < 20:
return False
ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
if ks_p_value < 0.05 and ks_d > 0.5:
adf = sm.tsa.stattools.adfuller(reference, 10)
if adf[1] < 0.05:
return True
except:
return None
return False
def detect_drop_off_cliff(timeseries, end_timestamp, full_duration):
"""
A timeseries is anomalous if the average of the last ten datapoints is <trigger>
times greater than the last data point. This algorithm is most suited to
timeseries with most datapoints being > 100 (e.g high rate). The arbitrary
<trigger> values become more noisy with lower value datapoints, but it still
matches drops off cliffs.
"""
try:
if len(timeseries) < 21:
return False
int_end_timestamp = int(timeseries[-1][0])
# Determine resolution of the data set
int_second_last_end_timestamp = int(timeseries[-2][0])
resolution = int_end_timestamp - int_second_last_end_timestamp
ten_data_point_seconds = resolution * 10
ten_datapoints_ago = int_end_timestamp - ten_data_point_seconds
ten_datapoint_array = scipy.array([x[1] for x in timeseries if x[0] <= int_end_timestamp and x[0] > ten_datapoints_ago])
ten_datapoint_array_len = len(ten_datapoint_array)
if ten_datapoint_array_len > 3:
# DO NOT handle if negative integers in range, where is the bottom of
# of the cliff if a range goes negative? The maths does not work either
ten_datapoint_min_value = np.amin(ten_datapoint_array)
if ten_datapoint_min_value < 0:
return False
ten_datapoint_max_value = np.amax(ten_datapoint_array)
if ten_datapoint_max_value < 10:
return False
ten_datapoint_array_sum = np.sum(ten_datapoint_array)
ten_datapoint_value = int(ten_datapoint_array[-1])
ten_datapoint_average = ten_datapoint_array_sum / ten_datapoint_array_len
ten_datapoint_value = int(ten_datapoint_array[-1])
ten_datapoint_max_value = np.amax(ten_datapoint_array)
if ten_datapoint_max_value == 0:
return False
if ten_datapoint_max_value < 101:
trigger = 15
if ten_datapoint_max_value < 20:
trigger = ten_datapoint_average / 2
if ten_datapoint_max_value < 1:
trigger = 0.1
if ten_datapoint_max_value > 100:
trigger = 100
if ten_datapoint_value == 0:
# Cannot divide by 0, so set to 0.1 to prevent error
ten_datapoint_value = 0.1
if ten_datapoint_value == 1:
trigger = 1
if ten_datapoint_value == 1 and ten_datapoint_max_value < 10:
trigger = 0.1
if ten_datapoint_value == 0.1 and ten_datapoint_average < 1 and ten_datapoint_array_sum < 7:
trigger = 7
# Filter low rate and variable between 0 and 100 metrics
if ten_datapoint_value <= 1 and ten_datapoint_array_sum < 100 and ten_datapoint_array_sum > 1:
all_datapoints_array = scipy.array([x[1] for x in timeseries])
all_datapoints_max_value = np.amax(all_datapoints_array)
if all_datapoints_max_value < 100:
# print "max_value for all datapoints at - " + str(int_end_timestamp) + " - " + str(all_datapoints_max_value)
return False
ten_datapoint_result = ten_datapoint_average / ten_datapoint_value
if int(ten_datapoint_result) > trigger:
return True
except:
return None
return False
"""
This is no longer no man's land, but feel free to play and try new stuff
"""
def run_algorithms(
timeseries, timeseries_name, end_timestamp, full_duration,
timeseries_file, skyline_app, algorithms):
"""
Iteratively run algorithms.
"""
results_dir = os.path.dirname(timeseries_file)
if not os.path.exists(results_dir):
os.makedirs(results_dir, mode=0o755)
start_analysis = int(time.time())
triggered_algorithms = []
anomalous = False
if str(algorithms) == "['all']":
if skyline_app == 'analyzer':
check_algorithms = ALGORITHMS
if skyline_app == 'mirage':
check_algorithms = MIRAGE_ALGORITHMS
if skyline_app == 'boundary':
check_algorithms = algorithms
if skyline_app == 'crucible':
check_algorithms = ALGORITHMS.append('detect_drop_off_cliff')
else:
check_algorithms = algorithms
logger.info('checking algorithms - %s' % (str(check_algorithms)))
for algorithm in check_algorithms:
detected = ''
try:
x_vals = np.arange(len(timeseries))
y_vals = np.array([y[1] for y in timeseries])
# Match default graphite graph size
plt.figure(figsize=(5.86, 3.08), dpi=100)
plt.plot(x_vals, y_vals)
# Start a couple datapoints in for the tail average
for index in range(10, len(timeseries)):
sliced = timeseries[:index]
anomaly = globals()[algorithm](sliced, end_timestamp, full_duration)
# Point out the datapoint if it's anomalous
if anomaly:
plt.plot([index], [sliced[-1][1]], 'ro')
detected = "DETECTED"
if detected == "DETECTED":
results_filename = join(results_dir + "/" + algorithm + "." + detected + ".png")
# logger.info('ANOMALY DETECTED :: %s' % (algorithm))
anomalous = True
triggered_algorithms.append(algorithm)
else:
results_filename = join(results_dir + "/" + algorithm + ".png")
plt.savefig(results_filename, dpi=100)
# logger.info('%s :: %s' % (algorithm, results_filename))
if python_version == 2:
os.chmod(results_filename, 0644)
if python_version == 3:
os.chmod(results_filename, mode=0o644)
except:
logger.error('error :: %s' % (traceback.format_exc()))
logger.info('info :: error thrown in algorithm running and plotting - %s' % (str(algorithm)))
end_analysis = int(time.time())
# @modified 20160814 - pyflaked
# seconds_to_run = end_analysis - start_analysis
# logger.info(
# 'analysis of %s at a full duration of %s took %s seconds' %
# (timeseries_name, str(full_duration), str(seconds_to_run)))
return anomalous, triggered_algorithms
```
#### File: skyline/ionosphere/layers.py
```python
from __future__ import division
import logging
import os
from time import time
import operator
import re
from sys import version_info
import traceback
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy.sql import select
import numpy as np
import scipy
# @added 20180828 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
import math
import settings
from database import (
get_engine, ionosphere_layers_table_meta, layers_algorithms_table_meta,
ionosphere_layers_matched_table_meta)
skyline_app = 'ionosphere'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
python_version = int(version_info[0])
this_host = str(os.uname()[1])
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_IONOSPHERE_DEBUG = settings.ENABLE_IONOSPHERE_DEBUG
except:
logger.error('error :: layers :: cannot determine ENABLE_IONOSPHERE_DEBUG from settings' % skyline_app)
ENABLE_IONOSPHERE_DEBUG = False
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
try:
learn_full_duration = int(settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS) * 86400
except:
learn_full_duration = 86400 * 30 # 2592000
context = 'ionosphere_layers'
def run_layer_algorithms(base_name, layers_id, timeseries, layers_count, layers_checked):
"""
Called by :class:`~skyline.skyline.Ionosphere.spin_process` to
evaluate anomalies against a custom layers boundary algorithm.
:param metric: the metric base_name
:param layers_id: the layer id
:param timeseries: the time series list
:param layers_count: the number of layers for the metric
:param layers_checked: the number of layers that have been checked
:type metric: str
:type layer_id: int
:type timeseries: list
:type layers_count: int
:type layers_checked: int
:return: True or False
:rtype: boolean
"""
logger = logging.getLogger(skyline_app_logger)
logger.info('layers :: layers_id - %s' % str(layers_id))
def layers_get_an_engine():
try:
engine, fail_msg, trace = get_engine(skyline_app)
return engine, fail_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: layers :: get_an_engine :: failed to get MySQL engine'
logger.error('%s' % fail_msg)
return None, fail_msg, trace
def layers_engine_disposal(engine):
try:
if engine:
try:
engine.dispose()
logger.info('layers :: MySQL engine disposed of')
return True
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
else:
logger.info('layers :: no MySQL engine to dispose of')
return True
except:
return False
return False
engine = None
try:
engine, log_msg, trace = layers_get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for layers_algorithms for layers_id %s - %s' % (str(layers_id), base_name))
return False
if not engine:
logger.error('error :: engine not obtained for layers_algorithms_table for layers_id %s - %s' % (str(layers_id), base_name))
return False
try:
layers_algorithms_table, log_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('layers_algorithms_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get layers_algorithms_table meta for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
layers_algorithms_result = None
try:
connection = engine.connect()
stmt = select([layers_algorithms_table]).where(layers_algorithms_table.c.layer_id == int(layers_id))
layers_algorithms_result = connection.execute(stmt)
connection.close()
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used
# layer_algorithms_details_object = layers_algorithms_result
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get layers_algorithms for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
layer_active = True
es_layer = False
f1_layer = False
f2_layer = False
# @added 20170616 - Feature #2048: D1 ionosphere layer
d1_layer = False
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
for row in layers_algorithms_result:
current_fp_id = row['fp_id']
current_metric_id = row['metric_id']
layer = row['layer']
if layer == 'D':
d_condition = row['condition']
d_boundary_limit = float(row['layer_boundary'])
# @added 20170616 - Feature #2048: D1 ionosphere layer
if layer == 'D1':
d1_condition = row['condition']
if str(d1_condition) != 'none':
d1_boundary_limit = float(row['layer_boundary'])
d1_boundary_times = row['times_in_row']
d1_layer = layer_active
if layer == 'E':
e_condition = row['condition']
e_boundary_limit = float(row['layer_boundary'])
e_boundary_times = row['times_in_row']
if layer == 'Es':
es_condition = row['condition']
es_day = row['layer_boundary']
es_layer = layer_active
if layer == 'F1':
f1_from_time = row['layer_boundary']
f1_layer = layer_active
if layer == 'F2':
f2_until_time = row['layer_boundary']
f2_layer = layer_active
except:
logger.error(traceback.format_exc())
logger.error('error :: failed iterate layers_algorithms_result for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
# Update ionosphere_layers checked_count
checked_timestamp = int(time())
try:
ionosphere_layers_table, log_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_layers_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_layers_table meta for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
try:
connection = engine.connect()
connection.execute(
ionosphere_layers_table.update(
ionosphere_layers_table.c.id == layers_id).
values(check_count=ionosphere_layers_table.c.check_count + 1,
last_checked=checked_timestamp))
connection.close()
logger.info('updated check_count for %s' % str(layers_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update check_count and last_checked for %s ' % str(layers_id))
if engine:
layers_engine_disposal(engine)
return False
not_anomalous = False
autoaggregate = False
autoaggregate_value = 0
# Determine if the namespace is to be aggregated using the Boundary settings
if settings.BOUNDARY_AUTOAGGRERATION:
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
for autoaggregate_metric in settings.BOUNDARY_AUTOAGGRERATION_METRICS:
autoaggregate = False
autoaggregate_value = 0
CHECK_MATCH_PATTERN = autoaggregate_metric[0]
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(base_name)
if pattern_match:
autoaggregate = True
autoaggregate_value = autoaggregate_metric[1]
break
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine Boundary autoaggregation settings for %s ' % str(layers_id))
if engine:
layers_engine_disposal(engine)
return False
try:
int_end_timestamp = int(timeseries[-1][0])
last_hour = int_end_timestamp - 3600
last_timestamp = int_end_timestamp
start_timestamp = last_hour
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timeseries variables for %s ' % str(layers_id))
if engine:
layers_engine_disposal(engine)
return False
use_timeseries = timeseries
if autoaggregate:
logger.info('layers :: aggregating timeseries at %s seconds' % str(autoaggregate_value))
aggregated_timeseries = []
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
next_timestamp = last_timestamp - int(autoaggregate_value)
logger.info('layers :: aggregating from %s to %s' % (str(start_timestamp), str(int_end_timestamp)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timeseries variables for autoaggregation for %s ' % str(layers_id))
if engine:
layers_engine_disposal(engine)
return False
valid_timestamps = False
try:
valid_timeseries = int_end_timestamp - start_timestamp
if valid_timeseries == 3600:
valid_timestamps = True
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: aggregating error - not valid_timeseries for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
if valid_timestamps:
try:
# Check sane variables otherwise we can just hang here in a while loop
while int(next_timestamp) > int(start_timestamp):
value = np.sum(scipy.array([int(x[1]) for x in timeseries if x[0] <= last_timestamp and x[0] > next_timestamp]))
aggregated_timeseries += ((last_timestamp, value),)
last_timestamp = next_timestamp
next_timestamp = last_timestamp - autoaggregate_value
aggregated_timeseries.reverse()
use_timeseries = aggregated_timeseries
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: error creating aggregated_timeseries for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
timeseries = use_timeseries
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
last_datapoint = timeseries[-1][1]
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: invalid timeseries for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
try:
int_end_timestamp = int(timeseries[-1][0])
last_hour = int_end_timestamp - 3600
last_timestamp = int_end_timestamp
start_timestamp = last_hour
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timeseries variables from the use_timeseries for %s ' % str(layers_id))
if engine:
layers_engine_disposal(engine)
return False
# Thanks to <NAME> http://stackoverflow.com/users/47773/matthew-flaschen
# for his operator op_func pattern at http://stackoverflow.com/a/2983144, it
# it is a funky pattern :)
ops = {'<': operator.le,
'>': operator.ge,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge}
# @added 20180919 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# Record in the database
d_approximately_close = False
e_approximately_close = False
# @added 20180828 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
try:
use_approximately_close = settings.IONOSPHERE_LAYERS_USE_APPROXIMATELY_CLOSE
except:
use_approximately_close = False
d_log_string = 'matches'
e_log_string = 'matches'
if use_approximately_close:
original_d_boundary_limit = d_boundary_limit
original_e_boundary_limit = e_boundary_limit
d_boundary_percent_tolerance = False
e_boundary_percent_tolerance = False
if d_condition == '>' or d_condition == '>=':
# Do not use approximately_close on values less than 10
if d_boundary_limit <= 10:
d_boundary_percent_tolerance = False
logger.info(
'layers :: no approximately_close tolerance added to D layer boundary limit of %s as < 10' % (
str(original_d_boundary_limit)))
if d_boundary_limit >= 11 and d_boundary_limit < 30:
d_boundary_percent_tolerance = 10
if d_boundary_limit >= 30:
d_boundary_percent_tolerance = 5
if d_boundary_percent_tolerance:
try:
d_boundary_limit_tolerance = int(math.ceil((d_boundary_limit / 100.0) * d_boundary_percent_tolerance))
d_boundary_limit = d_boundary_limit + d_boundary_limit_tolerance
logger.info(
'layers :: added a tolerance of %s to D layer boundary limit of %s, d_boundary_limit now %s' % (
str(d_boundary_limit_tolerance),
str(original_d_boundary_limit),
str(d_boundary_limit)))
d_log_string = 'matches (approximately_close)'
# @added 20180919 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
d_approximately_close = True
except:
d_boundary_limit = original_d_boundary_limit
if e_condition == '<' or e_condition == '<=':
e_boundary_limit_tolerance = False
e_boundary_percent_tolerance = False
# Do not use approximately_close on values less than 10
if e_boundary_limit <= 10:
e_boundary_limit_tolerance = False
logger.info(
'layers :: no approximately_close tolerance added to E layer boundary limit of %s as < 10' % (
str(original_e_boundary_limit)))
if e_boundary_limit >= 11 and e_boundary_limit < 30:
e_boundary_percent_tolerance = 10
if e_boundary_limit >= 30:
e_boundary_percent_tolerance = 5
if e_boundary_percent_tolerance:
try:
e_boundary_limit_tolerance = int(math.ceil((e_boundary_limit / 100.0) * e_boundary_percent_tolerance))
e_boundary_limit = e_boundary_limit - e_boundary_limit_tolerance
logger.info(
'layers :: subtracted a tolerance of %s to E layer boundary limit of %s, e_boundary_limit now %s' % (
str(e_boundary_limit_tolerance),
str(original_e_boundary_limit),
str(e_boundary_limit)))
e_log_string = 'matches (approximately_close)'
# @added 20180919 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
e_approximately_close = True
except:
e_boundary_limit = original_e_boundary_limit
# D layer
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
op_func = ops[d_condition]
op_func_result = op_func(last_datapoint, d_boundary_limit)
if op_func_result:
if engine:
layers_engine_disposal(engine)
# @modified 20180828 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# logger.info(
# 'layers :: discarding as the last value %s in the timeseries matches D layer boundary %s %s' % (
# str(last_datapoint), str(d_condition),
# str(d_boundary_limit)))
logger.info(
'layers :: discarding as the last value %s in the time series %s D layer boundary %s %s' % (
str(last_datapoint), d_log_string, str(d_condition),
str(d_boundary_limit)))
return False
else:
# @added 20181014 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
logger.info(
'layers :: the last value %s in the time series does not breach D layer boundary of %s %s' % (
str(last_datapoint), str(d_condition), str(d_boundary_limit)))
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: invalid D layer op_func for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
# @added 20170616 - Feature #2048: D1 ionosphere layer
if d1_layer:
try:
op_func = ops[d1_condition]
count = 0
while count < d1_boundary_times:
count += 1
if count == 1:
understandable_message_str = 'the last and latest value in the timeseries'
if count == 2:
understandable_message_str = 'the 2nd last value in the timeseries'
if count == 3:
understandable_message_str = 'the 3rd last value in the timeseries'
if count >= 4:
understandable_message_str = 'the %sth last value in the timeseries' % str(count)
value = float(timeseries[-count][1])
# @modified 20171106 - Bug #2208: D1 layer issue
# op_func_result = op_func(value, e_boundary_limit)
op_func_result = op_func(value, d1_boundary_limit)
if op_func_result:
if engine:
layers_engine_disposal(engine)
logger.info('layers :: %s was %s and breaches the D1 layer boundary of %s %s' % (
str(understandable_message_str), str(value),
str(d1_condition), str(d1_boundary_limit)))
return False
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: invalid D1 layer op_func for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
# E layer
# @modified 20170314 - Feature #1960: ionosphere_layers
# Changed condition so the correct method to not "unset"
# e_layer_matched = True
e_layer_matched = False
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
op_func = ops[e_condition]
count = 0
while count < e_boundary_times:
count += 1
if count == 1:
understandable_message_str = 'the last and latest value in the timeseries'
if count == 2:
understandable_message_str = 'the 2nd last value in the timeseries'
if count == 3:
understandable_message_str = 'the 3rd last value in the timeseries'
if count >= 4:
understandable_message_str = 'the %sth last value in the timeseries' % str(count)
value = float(timeseries[-count][1])
op_func_result = op_func(value, e_boundary_limit)
if not op_func_result:
logger.info('layers :: %s was %s and breaches the E layer boundary of %s %s' % (
str(understandable_message_str), str(value),
str(e_condition), str(e_boundary_limit)))
# @modified 20170314 - Feature #1960: ionosphere_layers
# Do not override the condition
# e_layer_matched = False
else:
e_layer_matched = True
# @modified 20180828 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# logger.info('layers :: %s was %s and matches the E layer boundary of %s as not anomalous' % (
# str(understandable_message_str), str(value),
# str(e_boundary_limit)))
logger.info('layers :: %s was %s and %s the E layer boundary of %s as not anomalous' % (
str(understandable_message_str), str(value), e_log_string,
str(e_boundary_limit)))
break
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: invalid E layer op_func for layers_id %s - %s' % (str(layers_id), base_name))
if engine:
layers_engine_disposal(engine)
return False
if es_layer:
logger.info('layers :: Es layer not implemented yet - cannot evaluate es_day %s and es_condition %s' % (str(es_day), str(es_condition)))
if f1_layer:
logger.info('layers :: F1 layer not implemented yet - cannot evaluate f1_from_time %s' % str(f1_from_time))
if f2_layer:
logger.info('layers :: F2 layer not implemented yet - cannot evaluate f2_until_time %s' % str(f2_until_time))
if not e_layer_matched:
if engine:
layers_engine_disposal(engine)
logger.info('layers :: returning False not_anomalous breached E layer')
return False
else:
not_anomalous = True
if not_anomalous:
try:
connection = engine.connect()
connection.execute(
ionosphere_layers_table.update(
ionosphere_layers_table.c.id == layers_id).
values(matched_count=ionosphere_layers_table.c.matched_count + 1,
last_matched=checked_timestamp))
connection.close()
logger.info('layers :: updated matched_count for %s' % str(layers_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: could not update matched_count and last_matched for %s ' % str(layers_id))
if engine:
layers_engine_disposal(engine)
return False
try:
ionosphere_layers_matched_table, log_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('layers :: ionosphere_layers_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: failed to get ionosphere_layers_matched_table meta for %s' % base_name)
if engine:
layers_engine_disposal(engine)
return False
# @added 20180919 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
approx_close = 0
if d_approximately_close or e_approximately_close:
approx_close = 1
# @added 20181013 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# In order to correctly label whether to match is an approximately_close
# match or not, the values need to be reassessed here using the original
# boundary limits, otherwise all matches are labelled as approx_close
# if approximately_close is enabled.
if use_approximately_close and approx_close:
original_d_boundary_limit_matched = False
original_e_boundary_limit_matched = False
if d_approximately_close:
if d_condition == '>' or d_condition == '>=':
try:
op_func = ops[d_condition]
op_func_result = op_func(last_datapoint, original_d_boundary_limit)
if op_func_result:
logger.info(
'layers :: the original D boundary limit of %s would have been breached if the approximately_close tolerance was not added' % (
str(original_d_boundary_limit)))
else:
logger.info(
'layers :: the original D boundary limit of %s would have passed without the approximately_close tolerance added' % (
str(original_d_boundary_limit)))
original_d_boundary_limit_matched = True
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: invalid original_d_boundary_limit D layer op_func check for layers_id %s - %s' % (str(layers_id), base_name))
if e_approximately_close:
try:
op_func = ops[e_condition]
count = 0
while count < e_boundary_times:
count += 1
if count == 1:
understandable_message_str = 'the last and latest value in the timeseries'
if count == 2:
understandable_message_str = 'the 2nd last value in the timeseries'
if count == 3:
understandable_message_str = 'the 3rd last value in the timeseries'
if count >= 4:
understandable_message_str = 'the %sth last value in the timeseries' % str(count)
value = float(timeseries[-count][1])
op_func_result = op_func(value, original_e_boundary_limit)
if op_func_result:
original_e_boundary_limit_matched = True
logger.info('layers :: %s was %s and the original E layer boundary of %s matches as not anomalous' % (
str(understandable_message_str), str(value),
str(original_e_boundary_limit)))
break
except:
logger.error(traceback.format_exc())
logger.error('error :: layers :: invalid original_e_boundary_limit E layer op_func check for layers_id %s - %s' % (str(layers_id), base_name))
if original_d_boundary_limit_matched or original_e_boundary_limit_matched:
approx_close = 0
logger.info('layers :: approximately_close values were not needed to obtain a match, not labelling approx_close')
else:
approx_close = 1
logger.info('layers :: approximately_close values were needed to obtain a match, labelling approx_close')
try:
connection = engine.connect()
ins = ionosphere_layers_matched_table.insert().values(
layer_id=int(layers_id),
fp_id=int(current_fp_id),
metric_id=int(current_metric_id),
anomaly_timestamp=int(last_timestamp),
anomalous_datapoint=float(last_datapoint),
full_duration=int(settings.FULL_DURATION),
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
layers_count=layers_count, layers_checked=layers_checked,
# @added 20180919 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
approx_close=approx_close)
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
logger.info('layers :: new ionosphere_layers_matched id: %s' % str(new_matched_id))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: layers :: could not update ionosphere_layers_matched for %s with with timestamp %s' % (
str(layers_id), str(last_timestamp)))
if engine:
layers_engine_disposal(engine)
return False
# @added 20170306 - Feature #1964: ionosphere_layers - namespace_matches
# to be considered
if engine:
layers_engine_disposal(engine)
return not_anomalous
```
#### File: skyline/mirage/mirage_algorithms.py
```python
from __future__ import division
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import traceback
import logging
from time import time
import os.path
import sys
from os import getpid
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
from settings import (
MIRAGE_ALGORITHMS,
MIRAGE_CONSENSUS,
MIRAGE_DATA_FOLDER,
MIRAGE_ENABLE_SECOND_ORDER,
PANDAS_VERSION,
RUN_OPTIMIZED_WORKFLOW,
SKYLINE_TMP_DIR,
REDIS_SOCKET_PATH,
REDIS_PASSWORD,
)
from algorithm_exceptions import *
skyline_app = 'mirage'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
# @added 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if MIRAGE_ENABLE_SECOND_ORDER:
from redis import StrictRedis
from msgpack import unpackb, packb
if REDIS_PASSWORD:
redis_conn = StrictRedis(password=REDIS_PASSWORD, unix_socket_path=REDIS_SOCKET_PATH)
else:
redis_conn = StrictRedis(unix_socket_path=REDIS_SOCKET_PATH)
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input timeseries is
anomalous or not.
The key here is to return a True or False boolean.
You should use the pythonic except mechanism to ensure any excpetions do not
cause things to halt and the record_algorithm_error utility can be used to
sample any algorithm errors to log.
To add an algorithm, define it here, and add its name to settings.MIRAGE_ALGORITHMS.
"""
def tail_avg(timeseries, second_order_resolution_seconds):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
try:
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
if PANDAS_VERSION < '0.17.0':
try:
test_statistic = demedianed.iget(-1) / median_deviation
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
else:
try:
test_statistic = demedianed.iat[-1] / median_deviation
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
else:
return False
def grubbs(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
try:
series = scipy.array([x[1] for x in timeseries])
stdDev = scipy.std(series)
# Issue #27 - Handle z_score agent.py RuntimeWarning - https://github.com/earthgecko/skyline/issues/27
# This change avoids spewing warnings on agent.py tests:
# RuntimeWarning: invalid value encountered in double_scalars
# If stdDev is 0 division returns nan which is not > grubbs_score so
# return False here
if stdDev == 0:
return False
mean = np.mean(series)
tail_average = tail_avg(timeseries, second_order_resolution_seconds)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def first_hour_average(timeseries, second_order_resolution_seconds):
"""
Calcuate the simple average over one hour, second order resolution seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
try:
last_hour_threshold = time() - (second_order_resolution_seconds - 3600)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries, second_order_resolution_seconds)
return abs(t - mean) > 3 * stdDev
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def stddev_from_average(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
try:
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries, second_order_resolution_seconds)
return abs(t - mean) > 3 * stdDev
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def stddev_from_moving_average(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
try:
series = pandas.Series([x[1] for x in timeseries])
if PANDAS_VERSION < '0.18.0':
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
else:
expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).mean()
stdDev = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=50).std(bias=False)
if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)
else:
return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]
# http://stackoverflow.com/questions/28757389/loc-vs-iloc-vs-ix-vs-at-vs-iat
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def mean_subtraction_cumulation(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than three standard deviations out in cumulative terms
after subtracting the mean from each data point.
"""
try:
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
# @modified 20180910 - Task #2588: Update dependencies
# This expAverage is unused
# if PANDAS_VERSION < '0.18.0':
# expAverage = pandas.stats.moments.ewma(series, com=15)
# else:
# expAverage = pandas.Series.ewm(series, ignore_na=False, min_periods=0, adjust=True, com=15).mean()
if PANDAS_VERSION < '0.17.0':
return abs(series.iget(-1)) > 3 * stdDev
else:
return abs(series.iat[-1]) > 3 * stdDev
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def least_squares(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
try:
x = np.array([t[0] for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
# @modified 20180910 - Task #2588: Update dependencies
# This results and residual are unused
# results = np.linalg.lstsq(A, y)
# residual = results[1]
# @modified 20180910 - Task #2588: Update dependencies
# Changed in version numpy 1.14.0 - see full comments in
# analyzer/algorithms.py under least_squares np.linalg.lstsq
# m, c = np.linalg.lstsq(A, y)[0]
m, c = np.linalg.lstsq(A, y, rcond=-1)[0]
errors = []
# Evaluate append once, not every time in the loop - this gains ~0.020 s
# on every timeseries potentially @earthgecko #1310
append_error = errors.append
# Further a question exists related to performance and accruracy with
# regards to how many datapoints are in the sample, currently all datapoints
# are used but this may not be the ideal or most efficient computation or
# fit for a timeseries... @earthgecko is checking graphite...
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
# errors.append(error) # @earthgecko #1310
append_error(error)
if len(errors) < 3:
return False
std_dev = scipy.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def histogram_bins(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to
tweak that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
try:
series = scipy.array([x[1] for x in timeseries])
t = tail_avg(timeseries, second_order_resolution_seconds)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def ks_test(timeseries, second_order_resolution_seconds):
"""
A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
that data distribution for last 10 minutes is different from last hour.
It produces false positives on non-stationary series so Augmented
Dickey-Fuller test applied to check for stationarity.
"""
try:
hour_ago = time() - 3600
ten_minutes_ago = time() - 600
reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
if reference.size < 20 or probe.size < 20:
return False
ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
if ks_p_value < 0.05 and ks_d > 0.5:
adf = sm.tsa.stattools.adfuller(reference, 10)
if adf[1] < 0.05:
return True
return False
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
return False
"""
THE END of NO MAN'S LAND
THE START of UTILITY FUNCTIONS
"""
def get_function_name():
"""
This is a utility function is used to determine what algorithm is reporting
an algorithm error when the record_algorithm_error is used.
"""
return traceback.extract_stack(None, 2)[0][2]
def record_algorithm_error(algorithm_name, traceback_format_exc_string):
"""
This utility function is used to facilitate the traceback from any algorithm
errors. The algorithm functions themselves we want to run super fast and
without fail in terms of stopping the function returning and not reporting
anything to the log, so the pythonic except is used to "sample" any
algorithm errors to a tmp file and report once per run rather than spewing
tons of errors into the log.
.. note::
algorithm errors tmp file clean up
the algorithm error tmp files are handled and cleaned up in
:class:`Analyzer` after all the spawned processes are completed.
:param algorithm_name: the algoritm function name
:type algorithm_name: str
:param traceback_format_exc_string: the traceback_format_exc string
:type traceback_format_exc_string: str
:return:
- ``True`` the error string was written to the algorithm_error_file
- ``False`` the error string was not written to the algorithm_error_file
:rtype:
- boolean
"""
current_process_pid = getpid()
algorithm_error_file = '%s/%s.%s.%s.algorithm.error' % (
SKYLINE_TMP_DIR, skyline_app, str(current_process_pid), algorithm_name)
try:
with open(algorithm_error_file, 'w') as f:
f.write(str(traceback_format_exc_string))
return True
except:
return False
def determine_median(timeseries):
"""
Determine the median of the values in the timeseries
"""
# logger.info('Running ' + str(get_function_name()))
try:
np_array = pandas.Series([x[1] for x in timeseries])
except:
return False
try:
array_median = np.median(np_array)
return array_median
except:
return False
return False
def is_anomalously_anomalous(metric_name, ensemble, datapoint):
"""
This method runs a meta-analysis on the metric to determine whether the
metric has a past history of triggering. TODO: weight intervals based on datapoint
"""
# We want the datapoint to avoid triggering twice on the same data
new_trigger = [time(), datapoint]
# Get the old history
raw_trigger_history = redis_conn.get('mirage_trigger_history.' + metric_name)
if not raw_trigger_history:
redis_conn.set('mirage_trigger_history.' + metric_name, packb([(time(), datapoint)]))
return True
trigger_history = unpackb(raw_trigger_history)
# Are we (probably) triggering on the same data?
if (new_trigger[1] == trigger_history[-1][1] and
new_trigger[0] - trigger_history[-1][0] <= 300):
return False
# Update the history
trigger_history.append(new_trigger)
redis_conn.set('mirage_trigger_history.' + metric_name, packb(trigger_history))
# Should we surface the anomaly?
trigger_times = [x[0] for x in trigger_history]
intervals = [
trigger_times[i + 1] - trigger_times[i]
for i, v in enumerate(trigger_times)
if (i + 1) < len(trigger_times)
]
series = pandas.Series(intervals)
mean = series.mean()
stdDev = series.std()
return abs(intervals[-1] - mean) > 3 * stdDev
def run_selected_algorithm(timeseries, metric_name, second_order_resolution_seconds):
"""
Run selected algorithms
"""
try:
ensemble = [globals()[algorithm](timeseries, second_order_resolution_seconds) for algorithm in MIRAGE_ALGORITHMS]
threshold = len(ensemble) - MIRAGE_CONSENSUS
if ensemble.count(False) <= threshold:
if MIRAGE_ENABLE_SECOND_ORDER:
if is_anomalously_anomalous(metric_name, ensemble, timeseries[-1][1]):
return True, ensemble, timeseries[-1][1]
else:
return True, ensemble, timeseries[-1][1]
return False, ensemble, timeseries[-1][1]
except:
logger.error('Algorithm error: %s' % traceback.format_exc())
return False, [], 1
```
#### File: skyline/webapp/ionosphere_backend.py
```python
from __future__ import division
import logging
from os import path, walk, listdir, remove
# import string
import operator
import time
import re
# import csv
# import datetime
import shutil
import glob
from ast import literal_eval
import traceback
from flask import request
import requests
# from redis import StrictRedis
# from sqlalchemy import (
# create_engine, Column, Table, Integer, String, MetaData, DateTime)
# from sqlalchemy.dialects.mysql import DOUBLE, TINYINT
from sqlalchemy.sql import select
# import json
# from tsfresh import __version__ as tsfresh_version
# @added 20170916 - Feature #1996: Ionosphere - matches page
from pymemcache.client.base import Client as pymemcache_Client
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
from sqlalchemy.sql import text
import settings
import skyline_version
# from skyline_functions import (
# RepresentsInt, mkdir_p, write_data_to_file, get_graphite_metric)
from skyline_functions import (mkdir_p, get_graphite_metric, write_data_to_file)
# from tsfresh_feature_names import TSFRESH_FEATURES
from database import (
get_engine, ionosphere_table_meta, metrics_table_meta,
ionosphere_matched_table_meta,
# @added 20170305 - Feature #1960: ionosphere_layers
ionosphere_layers_table_meta, layers_algorithms_table_meta,
# @added 20170307 - Feature #1960: ionosphere_layers
# To present matched layers Graphite graphs
ionosphere_layers_matched_table_meta
)
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except EnvironmentError as err:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings')
ENABLE_WEBAPP_DEBUG = False
try:
full_duration_seconds = int(settings.FULL_DURATION)
except:
full_duration_seconds = 86400
full_duration_in_hours = full_duration_seconds / 60 / 60
exclude_redis_json = 'redis.%sh.json' % str(int(full_duration_in_hours))
def ionosphere_get_metrics_dir(requested_timestamp, context):
"""
Get a list of all the metrics in timestamp training data or features profile
folder
:param requested_timestamp: the training data timestamp
:param context: the request context, training_data or features_profiles
:type requested_timestamp: str
:type context: str
:return: tuple of lists
:rtype: (list, list, list, list)
"""
if context == 'training_data':
log_context = 'training data'
if context == 'features_profiles':
log_context = 'features profile data'
logger.info(
'Metrics requested for timestamp %s dir %s' % (
log_context, str(requested_timestamp)))
if context == 'training_data':
data_dir = '%s' % settings.IONOSPHERE_DATA_FOLDER
if context == 'features_profiles':
data_dir = '%s' % (settings.IONOSPHERE_PROFILES_FOLDER)
# @added 20160113 - Feature #1858: Ionosphere - autobuild features_profiles dir
if settings.IONOSPHERE_AUTOBUILD:
# TODO: see ionosphere docs page. Create any deleted/missing
# features_profiles dir with best effort with the data that is
# available and DB data on-demand
# Build the expected features_profiles dirs from the DB and auto
# provision any that are not present
if not path.exists(data_dir):
# provision features_profiles image resources
mkdir_p(data_dir)
metric_paths = []
metrics = []
timestamps = []
human_dates = []
for root, dirs, files in walk(data_dir):
for file in files:
if file.endswith('.json'):
data_file = True
if re.search(exclude_redis_json, file):
data_file = False
if re.search('mirage.redis.json', file):
data_file = False
if re.search(requested_timestamp, root) and data_file:
metric_name = file.replace('.json', '')
add_metric = True
metric_file = path.join(root, file)
else:
add_metric = False
if add_metric:
metric_paths.append([metric_name, root])
metrics.append(metric_name)
if context == 'training_data':
timestamp = int(root.split('/')[5])
if context == 'features_profiles':
timestamp = int(path.split(root)[1])
timestamps.append(timestamp)
set_unique_metrics = set(metrics)
unique_metrics = list(set_unique_metrics)
unique_metrics.sort()
set_unique_timestamps = set(timestamps)
unique_timestamps = list(set_unique_timestamps)
unique_timestamps.sort()
for i_ts in unique_timestamps:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(int(i_ts)))
human_dates.append(human_date)
return (metric_paths, unique_metrics, unique_timestamps, human_dates)
def ionosphere_data(requested_timestamp, data_for_metric, context):
"""
Get a list of all training data or profiles folders and metrics
:param requested_timestamp: the training data or profile timestamp
:param data_for_metric: the metric base_name
:param context: the request context, training_data or features_profiles
:type requested_timestamp: str
:type data_for_metric: str
:type context: str
:return: tuple of lists
:rtype: (list, list, list, list)
"""
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, '', 1)
if context == 'training_data':
log_context = 'training data'
if context == 'features_profiles':
log_context = 'features profile data'
logger.info(
'%s requested for %s at timestamp %s' %
(log_context, str(base_name), str(requested_timestamp)))
if requested_timestamp:
timeseries_dir = base_name.replace('.', '/')
if context == 'training_data':
data_dir = '%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, requested_timestamp,
timeseries_dir)
if context == 'features_profiles':
data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, timeseries_dir,
requested_timestamp)
else:
if context == 'training_data':
data_dir = '%s' % settings.IONOSPHERE_DATA_FOLDER
if context == 'features_profiles':
data_dir = '%s' % (settings.IONOSPHERE_PROFILES_FOLDER)
metric_paths = []
metrics = []
timestamps = []
human_dates = []
if context == 'training_data':
data_dir = '%s' % settings.IONOSPHERE_DATA_FOLDER
if context == 'features_profiles':
data_dir = '%s' % settings.IONOSPHERE_PROFILES_FOLDER
for root, dirs, files in walk(data_dir):
for file in files:
if file.endswith('.json'):
data_file = True
if re.search(exclude_redis_json, file):
data_file = False
if re.search('mirage.redis.json', file):
data_file = False
if re.search('\\d{10}', root) and data_file:
metric_name = file.replace('.json', '')
if data_for_metric != 'all':
add_metric = False
if metric_name == base_name:
add_metric = True
if requested_timestamp:
if re.search(requested_timestamp, file):
add_metric = True
else:
add_metric = False
if add_metric:
metric_paths.append([metric_name, root])
metrics.append(metric_name)
if context == 'training_data':
timestamp = int(root.split('/')[5])
if context == 'features_profiles':
timestamp = int(path.split(root)[1])
timestamps.append(timestamp)
else:
metric_paths.append([metric_name, root])
metrics.append(metric_name)
if context == 'training_data':
timestamp = int(root.split('/')[5])
if context == 'features_profiles':
timestamp = int(path.split(root)[1])
timestamps.append(timestamp)
set_unique_metrics = set(metrics)
unique_metrics = list(set_unique_metrics)
unique_metrics.sort()
set_unique_timestamps = set(timestamps)
unique_timestamps = list(set_unique_timestamps)
unique_timestamps.sort()
for i_ts in unique_timestamps:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(i_ts)))
human_dates.append(human_date)
return (metric_paths, unique_metrics, unique_timestamps, human_dates)
def get_an_engine():
try:
engine, fail_msg, trace = get_engine(skyline_app)
return engine, fail_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get MySQL engine for'
logger.error('%s' % fail_msg)
# return None, fail_msg, trace
raise # to webapp to return in the UI
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
def ionosphere_metric_data(requested_timestamp, data_for_metric, context, fp_id):
"""
Get a list of all training data folders and metrics
"""
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Feature #1830: Ionosphere alerts
# Use the new_load_metric_vars method
def new_load_metric_vars(metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars module object or ``False``
:rtype: list
"""
if path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
int_keys = ['from_timestamp', 'metric_timestamp', 'added_at', 'full_duration']
array_keys = ['algorithms', 'triggered_algorithms']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
if settings.ENABLE_DEBUG:
logger.info(
'debug :: metric_vars determined - metric variable - metric - %s' % str(metric_vars.metric))
# @added 20170113 - Feature #1842: Ionosphere - Graphite now graphs
# Handle features profiles that were created pre the addition of
# full_duration
full_duration_present = False
for key, value in metric_vars_array:
if key == 'full_duration':
full_duration_present = True
if not full_duration_present:
try:
for key, value in metric_vars_array:
if key == 'from_timestamp':
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
use_from_timestamp = int(value_list[0])
if key == 'metric_timestamp':
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
use_metric_timestamp = int(value_list[0])
round_full_duration_days = int((use_metric_timestamp - use_from_timestamp) / 86400)
round_full_duration = int(round_full_duration_days) * 86400
logger.info('debug :: calculated missing full_duration')
metric_vars_array.append(['full_duration', round_full_duration])
except:
logger.error('error :: could not calculate missing full_duration')
metric_vars_array.append(['full_duration', 'unknown'])
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, '', 1)
if context == 'training_data':
log_context = 'training data'
if context == 'features_profiles':
log_context = 'features profile data'
logger.info('%s requested for %s at %s' % (
context, str(base_name), str(requested_timestamp)))
metric_paths = []
images = []
timeseries_dir = base_name.replace('.', '/')
if context == 'training_data':
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(requested_timestamp),
timeseries_dir)
if context == 'features_profiles':
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, timeseries_dir,
str(requested_timestamp))
# @added 20160113 - Feature #1858: Ionosphere - autobuild features_profiles dir
if settings.IONOSPHERE_AUTOBUILD:
# TODO: see ionosphere docs page. Create any deleted/missing
# features_profiles dir with best effort with the data that is
# available and DB data on-demand
if not path.exists(metric_data_dir):
# provision features_profiles image resources
mkdir_p(metric_data_dir)
# @added 20170617 - Feature #2054: ionosphere.save.training_data
if context == 'saved_training_data':
metric_data_dir = '%s_saved/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(requested_timestamp),
timeseries_dir)
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(requested_timestamp)))
metric_var_filename = '%s.txt' % str(base_name)
metric_vars_file = False
ts_json_filename = '%s.json' % str(base_name)
ts_json_file = 'none'
# @added 20170309 - Feature #1960: ionosphere_layers
# Also return the Analyzer FULL_DURATION timeseries if available in a Mirage
# based features profile
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
ionosphere_json_filename = '%s.mirage.redis.%sh.json' % (
base_name, str(int(full_duration_in_hours)))
ionosphere_json_file = 'none'
# @added 20170308 - Feature #1960: ionosphere_layers
layers_id_matched_file = False
layers_id_matched = None
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
fp_id_matched_file = None
fp_id_matched = None
# @added 20170401 - Task #1988: Review - Ionosphere layers - added fp_details_list
# Feature #1960: ionosphere_layers
fp_created_file = None
fp_details_list = []
td_files = listdir(metric_data_dir)
for i_file in td_files:
metric_file = path.join(metric_data_dir, i_file)
metric_paths.append([i_file, metric_file])
if i_file.endswith('.png'):
# @modified 20170106 - Feature #1842: Ionosphere - Graphite now graphs
# Exclude any graphite_now png files from the images lists
append_image = True
if '.graphite_now.' in i_file:
append_image = False
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Exclude any matched.fp-id images
if '.matched.fp_id' in i_file:
append_image = False
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1852: Ionosphere - features_profile matched graphite graphs
# Exclude any matched.fp-id images
if '.matched.layers.fp_id' in i_file:
append_image = False
if append_image:
images.append(str(metric_file))
if i_file == metric_var_filename:
metric_vars_file = str(metric_file)
if i_file == ts_json_filename:
ts_json_file = str(metric_file)
# @added 20170308 - Feature #1960: ionosphere_layers
if '.layers_id_matched.layers_id' in i_file:
layers_id_matched_file = str(metric_file)
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if '.profile_id_matched.fp_id' in i_file:
fp_id_matched_file = str(metric_file)
# @added 20170401 - Task #1988: Review - Ionosphere layers - added fp_details_list
# Feature #1960: ionosphere_layers
if '.fp.created.txt' in i_file:
fp_created_file = str(metric_file)
# @added 20170309 - Feature #1960: ionosphere_layers
if i_file == ionosphere_json_filename:
ionosphere_json_file = str(metric_file)
metric_vars_ok = False
metric_vars = ['error: could not read metrics vars file', metric_vars_file]
# @added 20181114 - Bug #2684: ionosphere_backend.py - metric_vars_file not set
# Handle if the metrics_var_file has not been set and is still False so
# that the path.isfile does not error with
# TypeError: coercing to Unicode: need string or buffer, bool found
metric_vars_file_exists = False
if metric_vars_file:
try:
if path.isfile(metric_vars_file):
metric_vars_file_exists = True
except:
logger.error('error :: metric_vars_file %s ws not found' % str(metric_vars_file))
# @modified 20181114 - Bug #2684: ionosphere_backend.py - metric_vars_file not set
# if path.isfile(metric_vars_file):
if metric_vars_file_exists:
try:
# @modified 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Feature #1830: Ionosphere alerts
# Use the new_load_metric_vars method
# metric_vars = []
# with open(metric_vars_file) as f:
# for line in f:
# add_line = line.replace('\n', '')
# metric_vars.append(add_line)
metric_vars = new_load_metric_vars(metric_vars_file)
metric_vars_ok = True
except:
trace = traceback.format_exc()
logger.error(trace)
metric_vars_ok = False
# logger.error(traceback.format_exc())
fail_msg = metric_vars
logger.error('%s' % fail_msg)
logger.error('error :: failed to load metric_vars from: %s' % str(metric_vars_file))
raise # to webapp to return in the UI
# TODO
# Make a sample ts for lite frontend
ts_json_ok = False
ts_json = ['error: no timeseries json file', ts_json_file]
if path.isfile(ts_json_file):
try:
# ts_json = []
with open(ts_json_file) as f:
for line in f:
ts_json.append(line)
ts_json_ok = True
except:
ts_json_ok = False
# @added 20170309 - Feature #1960: ionosphere_layers
# Also return the Analyzer FULL_DURATION timeseries if available in a Mirage
# based features profile
ionosphere_json_ok = False
ionosphere_json = False
ionosphere_json = []
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Return the anomalous_timeseries as an array to sample
anomalous_timeseries = []
if path.isfile(ionosphere_json_file):
try:
with open(ionosphere_json_file) as f:
for line in f:
ionosphere_json.append(line)
ionosphere_json_ok = True
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Return the anomalous_timeseries as an array to sample
with open((ionosphere_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
except:
ionosphere_json_ok = False
# @added 20171130 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Return the anomalous_timeseries as an array to sample and just use the
# ts_json file if there is no ionosphere_json_file
if not anomalous_timeseries:
with open((ts_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
# @added 20170308 - Feature #1960: ionosphere_layers
if layers_id_matched_file:
if path.isfile(layers_id_matched_file):
try:
with open(layers_id_matched_file) as f:
output = f.read()
layers_id_matched = int(output)
except:
layers_id_matched = False
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if fp_id_matched_file:
if path.isfile(fp_id_matched_file):
try:
with open(fp_id_matched_file) as f:
output = f.read()
fp_id_matched = int(output)
except:
fp_id_matched = False
# @added 20170401 - Task #1988: Review - Ionosphere layers - added fp_id_created
# Feature #1960: ionosphere_layers
if fp_created_file:
if path.isfile(fp_created_file):
try:
with open(fp_created_file) as f:
output = f.read()
fp_details_list = literal_eval(output)
except:
fp_details_list = None
ts_full_duration = None
if metric_vars_ok and ts_json_ok:
for key, value in metric_vars:
if key == 'full_duration':
ts_full_duration = value
data_to_process = False
if metric_vars_ok and ts_json_ok:
data_to_process = True
panorama_anomaly_id = False
# @modified 20180608 - Bug #2406: Ionosphere - panorama anomaly id lag
# Time shift the requested_timestamp by 120 seconds either way on the
# from_timestamp and until_timestamp parameter to account for any lag in the
# insertion of the anomaly by Panorama in terms Panorama only running every
# 60 second and Analyzer to Mirage to Ionosphere and back introduce
# additional lags. Panorama will not add multiple anomalies from the same
# metric in the time window so there is no need to consider the possibility
# of there being multiple anomaly ids being returned.
# url = '%s/panorama?metric=%s&from_timestamp=%s&until_timestamp=%s&panorama_anomaly_id=true' % (settings.SKYLINE_URL, str(base_name), str(requested_timestamp), str(requested_timestamp))
grace_from_timestamp = int(requested_timestamp) - 120
grace_until_timestamp = int(requested_timestamp) + 120
url = '%s/panorama?metric=%s&from_timestamp=%s&until_timestamp=%s&panorama_anomaly_id=true' % (settings.SKYLINE_URL, str(base_name), str(grace_from_timestamp), str(grace_until_timestamp))
panorama_resp = None
logger.info('getting anomaly id from panorama: %s' % str(url))
if settings.WEBAPP_AUTH_ENABLED:
user = str(settings.WEBAPP_AUTH_USER)
password = str(settings.WEBAPP_AUTH_USER_PASSWORD)
try:
if settings.WEBAPP_AUTH_ENABLED:
# @modified 20181106 - Bug #2668: Increase timeout on requests panorama id
# r = requests.get(url, timeout=2, auth=(user, password))
r = requests.get(url, timeout=settings.GRAPHITE_READ_TIMEOUT, auth=(user, password))
else:
# @modified 20181106 - Bug #2668: Increase timeout on requests panorama id
# r = requests.get(url, timeout=2)
r = requests.get(url, timeout=settings.GRAPHITE_READ_TIMEOUT)
panorama_resp = True
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get anomaly id from panorama: %s' % str(url))
if panorama_resp:
try:
data = literal_eval(r.text)
if str(data) == '[]':
panorama_anomaly_id = None
logger.debug('debug :: panorama anomlay data: %s' % str(data))
else:
panorama_anomaly_id = int(data[0][0])
logger.debug('debug :: panorama anomlay data: %s' % str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get anomaly id from panorama response: %s' % str(r.text))
# @added 20170106 - Feature #1842: Ionosphere - Graphite now graphs
# Graphite now graphs at TARGET_HOURS, 24h, 7d, 30d to fully inform the
# operator about the metric.
graphite_now_images = []
graphite_now = int(time.time())
graph_resolutions = []
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Feature #1842: Ionosphere - Graphite now graphs
# Also include the Graphite NOW graphs in the features_profile page as
# graphs WHEN CREATED
# if context == 'training_data':
if context == 'training_data' or context == 'features_profiles' or context == 'saved_training_data':
graph_resolutions = [int(settings.TARGET_HOURS), 24, 168, 720]
# @modified 20170107 - Feature #1842: Ionosphere - Graphite now graphs
# Exclude if matches TARGET_HOURS - unique only
_graph_resolutions = sorted(set(graph_resolutions))
graph_resolutions = _graph_resolutions
for target_hours in graph_resolutions:
graph_image = False
try:
graph_image_file = '%s/%s.graphite_now.%sh.png' % (metric_data_dir, base_name, str(target_hours))
# These are NOW graphs, so if the graph_image_file exists, remove it
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Feature #1842: Ionosphere - Graphite now graphs
# Only remove if this is the training_data context and match on the
# graph_image_file rather than graph_image response
if context == 'training_data':
target_seconds = int((target_hours * 60) * 60)
from_timestamp = str(graphite_now - target_seconds)
until_timestamp = str(graphite_now)
if path.isfile(graph_image_file):
try:
remove(str(graph_image_file))
logger.info('graph_image_file removed - %s' % str(graph_image_file))
except OSError:
pass
logger.info('getting Graphite graph for %s hours - from_timestamp - %s, until_timestamp - %s' % (str(target_hours), str(from_timestamp), str(until_timestamp)))
graph_image = get_graphite_metric(
skyline_app, base_name, from_timestamp, until_timestamp, 'image',
graph_image_file)
# if graph_image:
if path.isfile(graph_image_file):
graphite_now_images.append(graph_image_file)
# @added 20170106 - Feature #1842: Ionosphere - Graphite now graphs
# TODO: Un/fortunately there is no simple method by which to annotate
# these Graphite NOW graphs at the anomaly timestamp, if these were
# from Grafana, yes but we cannot add Grafana as a dep :) It would
# be possible to add these using the dygraph js method ala now, then
# and Panorama, but that is BEYOND the scope of js I want to have to
# deal with. I think we can leave this up to the operator's
# neocortex to do the processing. Which may be a valid point as
# sticking a single red line vertical line in the graphs ala Etsy
# deployments https://codeascraft.com/2010/12/08/track-every-release/
# or how @andymckay does it https://blog.mozilla.org/webdev/2012/04/05/tracking-deployments-in-graphite/
# would arguably introduce a bias in this context. The neocortex
# should be able to handle this timeshifting fairly simply with a
# little practice.
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Graphite graph at %s hours for %s' % (str(target_hours), base_name))
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Get the last 9 matched timestamps for the metric and get graphite graphs
# for them
graphite_matched_images = []
matched_count = 0
if context == 'features_profiles':
logger.info('getting MySQL engine')
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('%s' % fail_msg)
logger.error('error :: could not get a MySQL engine to get fp_ids')
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_checked_table meta for %s' % base_name)
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
matched_timestamps = []
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Added details of match anomalies for verification added to tsfresh_version
all_calc_features_sum = None
all_calc_features_count = None
sum_common_values = None
common_features_count = None
# That is more than it looks...
try:
connection = engine.connect()
stmt = select([ionosphere_matched_table]).where(ionosphere_matched_table.c.fp_id == int(fp_id))
result = connection.execute(stmt)
for row in result:
matched_timestamp = row['metric_timestamp']
matched_timestamps.append(int(matched_timestamp))
logger.info('found matched_timestamp %s' % (str(matched_timestamp)))
connection.close()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timestamps from ionosphere_matched for fp_id %s' % str(fp_id))
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal and raise
if engine:
engine_disposal(engine)
raise
len_matched_timestamps = len(matched_timestamps)
matched_count = len_matched_timestamps
logger.info('determined %s matched timestamps for fp_id %s' % (str(len_matched_timestamps), str(fp_id)))
last_matched_timestamps = []
if len_matched_timestamps > 0:
last_graph_timestamp = int(time.time())
# skip_if_last_graph_timestamp_less_than = 600
sorted_matched_timestamps = sorted(matched_timestamps)
# get_matched_timestamps = sorted_matched_timestamps[-4:]
get_matched_timestamps = sorted_matched_timestamps[-20:]
# Order newest first
for ts in get_matched_timestamps[::-1]:
if len(get_matched_timestamps) > 4:
graph_time_diff = int(last_graph_timestamp) - int(ts)
if graph_time_diff > 600:
last_matched_timestamps.append(ts)
else:
last_matched_timestamps.append(ts)
last_graph_timestamp = int(ts)
for matched_timestamp in last_matched_timestamps:
# Get Graphite images
graph_image = False
try:
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars if var_array[0] == key]
full_duration = int(value_list[0])
from_timestamp = str(int(matched_timestamp) - int(full_duration))
until_timestamp = str(matched_timestamp)
graph_image_file = '%s/%s.matched.fp_id-%s.%s.png' % (metric_data_dir, base_name, str(fp_id), str(matched_timestamp))
if not path.isfile(graph_image_file):
logger.info('getting Graphite graph for fp_id %s matched timeseries from_timestamp - %s, until_timestamp - %s' % (str(fp_id), str(from_timestamp), str(until_timestamp)))
graph_image = get_graphite_metric(
skyline_app, base_name, from_timestamp, until_timestamp, 'image',
graph_image_file)
else:
graph_image = True
logger.info('not getting Graphite graph as exists - %s' % (graph_image_file))
if graph_image:
graphite_matched_images.append(graph_image_file)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Graphite graph for fp_id %s at %s' % (str(fp_id), str(matched_timestamp)))
# @added 20170308 - Feature #1960: ionosphere_layers
# Added matched layers Graphite graphs
graphite_layers_matched_images = []
layers_matched_count = 0
if context == 'features_profiles':
if not engine:
fail_msg = 'error :: no engine obtained for ionosphere_layers_matched_table'
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
try:
ionosphere_layers_matched_table, log_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_layers_matched_table OK')
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: failed to get ionosphere_layers_matched_table meta for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
layers_id_matched = []
try:
connection = engine.connect()
stmt = select([ionosphere_layers_matched_table]).where(ionosphere_layers_matched_table.c.fp_id == int(fp_id))
result = connection.execute(stmt)
for row in result:
matched_layers_id = row['layer_id']
matched_timestamp = row['anomaly_timestamp']
layers_id_matched.append([int(matched_timestamp), int(matched_layers_id)])
# logger.info('found matched_timestamp %s' % (str(matched_timestamp)))
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not determine timestamps from ionosphere_matched for fp_id %s' % str(fp_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
layers_matched_count = len(layers_id_matched)
logger.info('determined %s matched layers timestamps for fp_id %s' % (str(layers_matched_count), str(fp_id)))
last_matched_layers = []
if layers_matched_count > 0:
last_graph_timestamp = int(time.time())
# skip_if_last_graph_timestamp_less_than = 600
sorted_matched_layers = sorted(layers_id_matched)
get_matched_layers = sorted_matched_layers[-20:]
# Order newest first
for matched_layer in get_matched_layers[::-1]:
if len(get_matched_layers) > 4:
graph_time_diff = int(last_graph_timestamp) - int(matched_layer[0])
if graph_time_diff > 600:
last_matched_layers.append(matched_layer)
else:
last_matched_layers.append(matched_layer)
last_graph_timestamp = int(matched_layer[0])
logger.info('determined %s matched layers timestamps for graphs for fp_id %s' % (str(len(last_matched_layers)), str(fp_id)))
for matched_layer in last_matched_layers:
# Get Graphite images
graph_image = False
matched_layer_id = None
try:
full_duration = int(settings.FULL_DURATION)
from_timestamp = str(int(matched_layer[0]) - int(full_duration))
until_timestamp = str(matched_layer[0])
matched_layer_id = str(matched_layer[1])
graph_image_file = '%s/%s.layers_id-%s.matched.layers.fp_id-%s.%s.png' % (
metric_data_dir, base_name, str(matched_layer_id),
str(fp_id), str(matched_layer[0]))
if not path.isfile(graph_image_file):
logger.info(
'getting Graphite graph for fp_id %s layer_id %s matched timeseries from_timestamp - %s, until_timestamp - %s' % (
str(fp_id), str(matched_layer_id), str(from_timestamp),
str(until_timestamp)))
graph_image = get_graphite_metric(
skyline_app, base_name, from_timestamp, until_timestamp, 'image',
graph_image_file)
else:
graph_image = True
logger.info('not getting Graphite graph as exists - %s' % (graph_image_file))
if graph_image:
graphite_layers_matched_images.append(graph_image_file)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Graphite graph for fp_id %s at %s' % (str(fp_id), str(matched_timestamp)))
if engine:
engine_disposal(engine)
return (
metric_paths, images, human_date, metric_vars, ts_json, data_to_process,
panorama_anomaly_id, graphite_now_images, graphite_matched_images,
matched_count,
# @added 20170308 - Feature #1960: ionosphere_layers
# Show the latest matched layers graphs as well and the matched layers_id_matched
# in the training_data page if there has been one.
graphite_layers_matched_images, layers_id_matched, ts_full_duration,
# @added 20170309 - Feature #1960: ionosphere_layers
# Also return the Analyzer FULL_DURATION timeseries if available in a Mirage
# based features profile
ionosphere_json,
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Return the anomalous_timeseries as an array to sample and fp_id_matched
anomalous_timeseries, fp_id_matched,
# @added 20170401 - Task #1988: Review - Ionosphere layers - added fp_details_list
# Feature #1960: ionosphere_layers
fp_details_list)
# @modified 20170114 - Feature #1854: Ionosphere learn
# DEPRECATED create_features_profile here as this function has been migrated in
# order to decouple the creation of features profiles from the webapp as
# ionosphere/learn now requires access to this function as well. Moved to a
# shared function in ionosphere_functions.py
# REMOVED
# def create_features_profile(requested_timestamp, data_for_metric, context):
def features_profile_details(fp_id):
"""
Get the Ionosphere details of a fetures profile
:param fp_id: the features profile id
:type fp_id: str
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: features_profile_details'
trace = 'none'
fail_msg = 'none'
fp_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
ionosphere_table = None
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for fp_id %s details' % str(fp_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: ionosphere_table OK' % function_str)
try:
connection = engine.connect()
stmt = select([ionosphere_table]).where(ionosphere_table.c.id == int(fp_id))
result = connection.execute(stmt)
row = result.fetchone()
fp_details_object = row
connection.close()
try:
tsfresh_version = row['tsfresh_version']
except:
tsfresh_version = 'unknown'
try:
calc_time = row['calc_time']
except:
calc_time = 'unknown'
full_duration = row['full_duration']
features_count = row['features_count']
features_sum = row['features_sum']
deleted = row['deleted']
matched_count = row['matched_count']
last_matched = row['last_matched']
if str(last_matched) == '0':
human_date = 'never matched'
else:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_matched)))
created_timestamp = row['created_timestamp']
full_duration = row['full_duration']
# @modified 20161229 - Feature #1830: Ionosphere alerts
# Added checked_count and last_checked
last_checked = row['last_checked']
if str(last_checked) == '0':
checked_human_date = 'never checked'
else:
checked_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_checked)))
checked_count = row['checked_count']
# @modified 20170114 - Feature #1854: Ionosphere learn
# Added parent_id and generation
parent_id = row['parent_id']
generation = row['generation']
# @added 20170402 - Feature #2000: Ionosphere - validated
validated = row['validated']
# @added 20170305 - Feature #1960: ionosphere_layers
layers_id = row['layers_id']
fp_details = '''
tsfresh_version :: %s | calc_time :: %s
features_count :: %s
features_sum :: %s
deleted :: %s
matched_count :: %s
last_matched :: %s | human_date :: %s
created_timestamp :: %s
full_duration :: %s
checked_count :: %s
last_checked :: %s | human_date :: %s
parent_id :: %s | generation :: %s | validated :: %s
layers_id :: %s
''' % (str(tsfresh_version), str(calc_time), str(features_count),
str(features_sum), str(deleted), str(matched_count),
str(last_matched), str(human_date), str(created_timestamp),
str(full_duration), str(checked_count), str(last_checked),
str(checked_human_date), str(parent_id), str(generation),
str(validated), str(layers_id))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get fp_id %s details from ionosphere DB table' % str(fp_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
# @modified 20170114 - Feature #1854: Ionosphere learn - generations
# Return the fp_details_object so that webapp can pass the parent_id and
# generation to the templates
# return fp_details, True, fail_msg, trace
return fp_details, True, fail_msg, trace, fp_details_object
# @added 20170118 - Feature #1862: Ionosphere features profiles search page
# Added fp_search parameter
# @modified 20170220 - Feature #1862: Ionosphere features profiles search page
def ionosphere_search(default_query, search_query):
"""
Gets the details features profiles from the database, using the URL arguments
that are passed in by the :obj:`request.args` to build the MySQL select
query string and queries the database, parse the results and creates an
array of the features profiles that matched the query.
:param None: determined from :obj:`request.args`
:return: array
:rtype: array
"""
logger = logging.getLogger(skyline_app_logger)
import time
import datetime
function_str = 'ionoshere_backend.py :: ionosphere_search'
trace = 'none'
fail_msg = 'none'
full_duration_list = []
enabled_list = []
tsfresh_version_list = []
generation_list = []
features_profiles = []
features_profiles_count = []
# possible_options = [
# 'full_duration', 'enabled', 'tsfresh_version', 'generation', 'count']
logger.info('determining search parameters')
query_string = 'SELECT * FROM ionosphere'
# id, metric_id, full_duration, anomaly_timestamp, enabled, tsfresh_version,
# calc_time, features_sum, matched_count, last_matched, created_timestamp,
# last_checked, checked_count, parent_id, generation
needs_and = False
count_request = False
matched_count = None
checked_count = None
generation_count = None
count_by_metric = None
if 'count_by_metric' in request.args:
count_by_metric = request.args.get('count_by_metric', None)
if count_by_metric and count_by_metric != 'false':
count_request = True
count_by_metric = True
features_profiles_count = []
query_string = 'SELECT COUNT(*), metric_id FROM ionosphere GROUP BY metric_id'
else:
count_by_metric = False
count_by_matched = None
if 'count_by_matched' in request.args:
count_by_matched = request.args.get('count_by_matched', None)
if count_by_matched and count_by_matched != 'false':
count_request = True
count_by_matched = True
matched_count = []
# query_string = 'SELECT COUNT(*), id FROM ionosphere GROUP BY matched_count ORDER BY COUNT(*)'
query_string = 'SELECT matched_count, id FROM ionosphere ORDER BY matched_count'
else:
count_by_matched = False
count_by_checked = None
if 'count_by_checked' in request.args:
count_by_checked = request.args.get('count_by_checked', None)
if count_by_checked and count_by_checked != 'false':
count_request = True
count_by_checked = True
checked_count = []
query_string = 'SELECT COUNT(*), id FROM ionosphere GROUP BY checked_count ORDER BY COUNT(*)'
query_string = 'SELECT checked_count, id FROM ionosphere ORDER BY checked_count'
else:
count_by_checked = False
count_by_generation = None
if 'count_by_generation' in request.args:
count_by_generation = request.args.get('count_by_generation', None)
if count_by_generation and count_by_generation != 'false':
count_request = True
count_by_generation = True
generation_count = []
query_string = 'SELECT COUNT(*), generation FROM ionosphere GROUP BY generation ORDER BY COUNT(*)'
else:
count_by_generation = False
get_metric_profiles = None
metric = None
if 'metric' in request.args:
metric = request.args.get('metric', None)
if metric and metric != 'all' and metric != '*':
# A count_request always takes preference over a metric
if not count_request:
get_metric_profiles = True
query_string = 'SELECT * FROM ionosphere WHERE metric_id=REPLACE_WITH_METRIC_ID'
needs_and = True
else:
new_query_string = 'SELECT * FROM ionosphere WHERE metric_id=REPLACE_WITH_METRIC_ID'
query_string = new_query_string
needs_and = True
if 'from_timestamp' in request.args:
from_timestamp = request.args.get('from_timestamp', None)
if from_timestamp and from_timestamp != 'all':
if ":" in from_timestamp:
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
from_timestamp = str(int(new_from_timestamp))
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate from_timestamp
try:
validate_from_timestamp = int(from_timestamp) + 1
int_from_timestamp = validate_from_timestamp - 1
validated_from_timestamp = str(int_from_timestamp)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate from_timestamp'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, from_timestamp)
new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, validate_from_timestamp)
query_string = new_query_string
needs_and = True
else:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, from_timestamp)
new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, validated_from_timestamp)
query_string = new_query_string
needs_and = True
if 'until_timestamp' in request.args:
until_timestamp = request.args.get('until_timestamp', None)
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
until_timestamp = str(int(new_until_timestamp))
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate until_timestamp
try:
validate_until_timestamp = int(until_timestamp) + 1
int_until_timestamp = validate_until_timestamp - 1
validated_until_timestamp = str(int_until_timestamp)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate until_timestamp'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s AND anomaly_timestamp <= %s' % (query_string, until_timestamp)
new_query_string = '%s AND anomaly_timestamp <= %s' % (query_string, validated_until_timestamp)
query_string = new_query_string
needs_and = True
else:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s WHERE anomaly_timestamp <= %s' % (query_string, until_timestamp)
new_query_string = '%s WHERE anomaly_timestamp <= %s' % (query_string, validated_until_timestamp)
query_string = new_query_string
needs_and = True
if 'generation_greater_than' in request.args:
generation_greater_than = request.args.get('generation_greater_than', None)
if generation_greater_than and generation_greater_than != '0':
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate generation_greater_than
try:
validate_generation_greater_than = int(generation_greater_than) + 1
int_generation_greater_than = validate_generation_greater_than - 1
validated_generation_greater_than = str(int_generation_greater_than)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate generation_greater_than'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s AND generation > %s' % (query_string, generation_greater_than)
new_query_string = '%s AND generation > %s' % (query_string, validated_generation_greater_than)
query_string = new_query_string
needs_and = True
else:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s WHERE generation > %s' % (query_string, generation_greater_than)
new_query_string = '%s WHERE generation > %s' % (query_string, validated_generation_greater_than)
query_string = new_query_string
needs_and = True
# @added 20170315 - Feature #1960: ionosphere_layers
if 'layers_id_greater_than' in request.args:
layers_id_greater_than = request.args.get('layers_id_greater_than', None)
if layers_id_greater_than and layers_id_greater_than != '0':
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate layers_id_greater_than
try:
validate_layers_id_greater_than = int(layers_id_greater_than) + 1
int_layers_id_greater_than = validate_layers_id_greater_than - 1
validated_layers_id_greater_than = str(int_layers_id_greater_than)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate layers_id_greater_than'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s AND layers_id > %s' % (query_string, layers_id_greater_than)
new_query_string = '%s AND layers_id > %s' % (query_string, validated_layers_id_greater_than)
query_string = new_query_string
needs_and = True
else:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s WHERE layers_id > %s' % (query_string, layers_id_greater_than)
new_query_string = '%s WHERE layers_id > %s' % (query_string, validated_layers_id_greater_than)
query_string = new_query_string
needs_and = True
# @added 20170402 - Feature #2000: Ionosphere - validated
if 'validated_equals' in request.args:
validated_equals = request.args.get('validated_equals', 'any')
if validated_equals == 'true':
validate_string = 'validated = 1'
if validated_equals == 'false':
validate_string = 'validated = 0'
if validated_equals != 'any':
if needs_and:
new_query_string = '%s AND %s' % (query_string, validate_string)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE %s' % (query_string, validate_string)
query_string = new_query_string
needs_and = True
# @added 20170518 - Feature #1996: Ionosphere - matches page - matched_greater_than
if 'matched_greater_than' in request.args:
matched_greater_than = request.args.get('matched_greater_than', None)
if matched_greater_than and matched_greater_than != '0':
# @added 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Validate matched_greater_than
try:
validate_matched_greater_than = int(matched_greater_than) + 1
int_matched_greater_than = validate_matched_greater_than - 1
validated_matched_greater_than = str(int_matched_greater_than)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not validate matched_greater_than'
logger.error('%s' % fail_msg)
raise
if needs_and:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s AND matched_count > %s' % (query_string, matched_greater_than)
new_query_string = '%s AND matched_count > %s' % (query_string, validated_matched_greater_than)
query_string = new_query_string
needs_and = True
else:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Use validated variable
# new_query_string = '%s WHERE matched_count > %s' % (query_string, matched_greater_than)
new_query_string = '%s WHERE matched_count > %s' % (query_string, validated_matched_greater_than)
query_string = new_query_string
needs_and = True
# @added 20170913 - Feature #2056: ionosphere - disabled_features_profiles
# Added enabled query modifier to search and display enabled or disabled
# profiles in the search_features_profiles page results.
if 'enabled' in request.args:
enabled = request.args.get('enabled', None)
enabled_query = False
enabled_query_value = 1
if enabled:
if str(enabled) == 'all':
enabled_query = False
if str(enabled) == 'true':
enabled_query = True
if str(enabled) == 'false':
enabled_query = True
enabled_query_value = 0
if enabled_query:
if needs_and:
new_query_string = '%s AND enabled = %s' % (query_string, str(enabled_query_value))
query_string = new_query_string
else:
new_query_string = '%s WHERE enabled = %s' % (query_string, str(enabled_query_value))
query_string = new_query_string
needs_and = True
# @modified 20180414 - Feature #1862: Ionosphere features profiles search page
# Branch #2270: luminosity
# Moved from being just above metrics = [] below as required to determine
# metric_like queries
engine_needed = True
engine = None
if engine_needed:
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('metrics_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get metrics_table meta')
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
# @added 20180414 - Feature #1862: Ionosphere features profiles search page
# Branch #2270: luminosity
if 'metric_like' in request.args:
metric_like_str = request.args.get('metric_like', 'all')
if metric_like_str != 'all':
# SQLAlchemy requires the MySQL wildcard % to be %% to prevent
# interpreting the % as a printf-like format character
python_escaped_metric_like = metric_like_str.replace('%', '%%')
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# Change the query
# nosec to exclude from bandit tests
# metrics_like_query = 'SELECT id FROM metrics WHERE metric LIKE \'%s\'' % (str(python_escaped_metric_like)) # nosec
# logger.info('executing metrics_like_query - %s' % metrics_like_query)
like_string_var = str(metric_like_str)
metrics_like_query = text("""SELECT id FROM metrics WHERE metric LIKE :like_string""")
metric_ids = ''
try:
connection = engine.connect()
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# results = connection.execute(metrics_like_query)
results = connection.execute(metrics_like_query, like_string=metric_like_str)
connection.close()
for row in results:
metric_id = str(row[0])
if metric_ids == '':
metric_ids = '%s' % (metric_id)
else:
new_metric_ids = '%s, %s' % (metric_ids, metric_id)
metric_ids = new_metric_ids
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine ids from metrics table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
if needs_and:
new_query_string = '%s AND metric_id IN (%s)' % (query_string, str(metric_ids))
query_string = new_query_string
else:
new_query_string = '%s WHERE metric_id IN (%s)' % (query_string, str(metric_ids))
query_string = new_query_string
needs_and = True
ordered_by = None
if 'order' in request.args:
order = request.args.get('order', 'DESC')
if str(order) == 'DESC':
ordered_by = 'DESC'
if str(order) == 'ASC':
ordered_by = 'ASC'
if ordered_by:
if count_request and search_query:
new_query_string = '%s %s' % (query_string, ordered_by)
else:
new_query_string = '%s ORDER BY id %s' % (query_string, ordered_by)
query_string = new_query_string
if 'limit' in request.args:
limit = request.args.get('limit', '30')
try:
validate_limit = int(limit) + 0
if int(limit) != 0:
# @modified 20190116 - Mutliple SQL Injection Security Vulnerabilities #86
# Bug #2818: Mutliple SQL Injection Security Vulnerabilities
# new_query_string = '%s LIMIT %s' % (query_string, str(limit))
new_query_string = '%s LIMIT %s' % (query_string, str(validate_limit))
query_string = new_query_string
except:
logger.error('error :: limit is not an integer - %s' % str(limit))
metrics = []
try:
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.id != 0)
result = connection.execute(stmt)
for row in result:
metric_id = int(row['id'])
metric_name = str(row['metric'])
metrics.append([metric_id, metric_name])
connection.close()
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: could not determine metrics from metrics table'
logger.error('%s' % fail_msg)
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal and raise
if engine:
engine_disposal(engine)
raise
if get_metric_profiles:
metrics_id = None
for metric_obj in metrics:
if metrics_id:
break
if metric == str(metric_obj[1]):
metrics_id = str(metric_obj[0])
new_query_string = query_string.replace('REPLACE_WITH_METRIC_ID', metrics_id)
query_string = new_query_string
logger.debug('debug :: query_string - %s' % query_string)
ionosphere_table = None
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for options'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
logger.info('%s :: ionosphere_table OK' % function_str)
all_fps = []
try:
connection = engine.connect()
stmt = select([ionosphere_table]).where(ionosphere_table.c.id != 0)
result = connection.execute(stmt)
for row in result:
try:
fp_id = int(row['id'])
fp_metric_id = int(row['metric_id'])
for metric_obj in metrics:
if fp_metric_id == int(metric_obj[0]):
fp_metric = metric_obj[1]
break
full_duration = int(row['full_duration'])
anomaly_timestamp = int(row['anomaly_timestamp'])
tsfresh_version = str(row['tsfresh_version'])
# These handle MySQL NULL
try:
calc_time = float(row['calc_time'])
except:
calc_time = 0
try:
features_count = int(row['features_count'])
except:
features_count = 0
try:
features_sum = float(row['features_sum'])
except:
features_sum = 0
try:
deleted = int(row['deleted'])
except:
deleted = 0
fp_matched_count = int(row['matched_count'])
last_matched = int(row['last_matched'])
if str(last_matched) == '0':
human_date = 'never matched'
else:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_matched)))
created_timestamp = str(row['created_timestamp'])
last_checked = int(row['last_checked'])
if str(last_checked) == '0':
checked_human_date = 'never checked'
else:
checked_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_checked)))
fp_checked_count = int(row['checked_count'])
fp_parent_id = int(row['parent_id'])
fp_generation = int(row['generation'])
# @added 20170402 - Feature #2000: Ionosphere - validated
fp_validated = int(row['validated'])
all_fps.append([fp_id, fp_metric_id, str(fp_metric), full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated])
# logger.info('%s :: %s feature profiles found' % (function_str, str(len(all_fps))))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
logger.error('error :: bad row data')
connection.close()
all_fps.sort(key=operator.itemgetter(int(0)))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
logger.error('error :: bad row data')
raise
if count_request and search_query:
features_profiles = None
features_profiles_count = None
full_duration_list = None
enabled_list = None
tsfresh_version_list = None
generation_list = None
if count_by_metric and search_query:
features_profiles_count = []
if engine_needed and engine:
try:
stmt = query_string
connection = engine.connect()
for row in engine.execute(stmt):
fp_count = int(row[0])
fp_metric_id = int(row['metric_id'])
for metric_obj in metrics:
if fp_metric_id == metric_obj[0]:
fp_metric = metric_obj[1]
break
features_profiles_count.append([fp_count, fp_metric_id, str(fp_metric)])
connection.close()
logger.info('%s :: features_profiles_count %s' % (function_str, str(len(features_profiles_count))))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to count features profiles'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
features_profiles_count.sort(key=operator.itemgetter(int(0)))
if count_request and search_query:
if not count_by_metric:
if engine_needed and engine:
try:
stmt = query_string
connection = engine.connect()
for row in engine.execute(stmt):
item_count = int(row[0])
item_id = int(row[1])
if count_by_matched or count_by_checked:
for fp_obj in all_fps:
if item_id == fp_obj[0]:
metric_name = fp_obj[2]
break
if count_by_matched:
matched_count.append([item_count, item_id, metric_name])
if count_by_checked:
checked_count.append([item_count, item_id, metric_name])
if count_by_generation:
generation_count.append([item_count, item_id])
connection.close()
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for options'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
if count_request and search_query:
if engine:
engine_disposal(engine)
# @modified 20170809 - Bug #2136: Analyzer stalling on no metrics
# Added except to all del methods to prevent stalling if any object does
# not exist
try:
del all_fps
except:
logger.error('error :: failed to del all_fps')
try:
del metrics
except:
logger.error('error :: failed to del metrics')
search_success = True
return (features_profiles, features_profiles_count, matched_count,
checked_count, generation_count, full_duration_list,
enabled_list, tsfresh_version_list, generation_list,
search_success, fail_msg, trace)
features_profiles = []
# @added 20170322 - Feature #1960: ionosphere_layers
# Added layers information to the features_profiles items
layers_present = False
if engine_needed and engine and search_query:
try:
connection = engine.connect()
if get_metric_profiles:
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == int(metric_id))
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == int(metrics_id))
logger.debug('debug :: stmt - is abstracted')
else:
stmt = query_string
logger.debug('debug :: stmt - %s' % stmt)
try:
result = connection.execute(stmt)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: MySQL query failed'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
for row in result:
try:
fp_id = int(row['id'])
metric_id = int(row['metric_id'])
for metric_obj in metrics:
if metric_id == int(metric_obj[0]):
metric = metric_obj[1]
break
full_duration = int(row['full_duration'])
anomaly_timestamp = int(row['anomaly_timestamp'])
tsfresh_version = str(row['tsfresh_version'])
# These handle MySQL NULL
try:
calc_time = float(row['calc_time'])
except:
calc_time = 0
try:
features_count = int(row['features_count'])
except:
features_count = 0
try:
features_sum = float(row['features_sum'])
except:
features_sum = 0
try:
deleted = int(row['deleted'])
except:
deleted = 0
fp_matched_count = int(row['matched_count'])
last_matched = int(row['last_matched'])
if str(last_matched) == '0':
human_date = 'never matched'
else:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_matched)))
created_timestamp = str(row['created_timestamp'])
last_checked = int(row['last_checked'])
if str(last_checked) == '0':
checked_human_date = 'never checked'
else:
checked_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_checked)))
fp_checked_count = int(row['checked_count'])
fp_parent_id = int(row['parent_id'])
fp_generation = int(row['generation'])
# @added 20170402 - Feature #2000: Ionosphere - validated
fp_validated = int(row['validated'])
fp_layers_id = int(row['layers_id'])
# @added 20170322 - Feature #1960: ionosphere_layers
# Added layers information to the features_profiles items
if fp_layers_id > 0:
layers_present = True
# @modified 20180812 - Feature #2430: Ionosphere validate learnt features profiles page
# Fix bug and make this function output useable to
# get_features_profiles_to_validate
append_to_features_profile_list = True
if 'validated_equals' in request.args:
validated_equals = request.args.get('validated_equals', 'any')
else:
validated_equals = 'any'
if validated_equals == 'false':
if fp_validated == 1:
append_to_features_profile_list = False
if append_to_features_profile_list:
features_profiles.append([fp_id, metric_id, str(metric), full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id])
# @added 20170912 - Feature #2056: ionosphere - disabled_features_profiles
features_profile_enabled = int(row['enabled'])
if features_profile_enabled == 1:
enabled_list.append(fp_id)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
logger.error('error :: bad row data')
connection.close()
features_profiles.sort(key=operator.itemgetter(int(0)))
logger.debug('debug :: features_profiles length - %s' % str(len(features_profiles)))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table data'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# @added 20170322 - Feature #1960: ionosphere_layers
# Added layers information to the features_profiles items
features_profiles_layers = []
if features_profiles and layers_present:
try:
ionosphere_layers_table, log_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_layers OK')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_layers meta'
logger.error('%s' % fail_msg)
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
try:
connection = engine.connect()
if get_metric_profiles:
stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.metric_id == int(metrics_id))
# logger.debug('debug :: stmt - is abstracted')
else:
layers_query_string = 'SELECT * FROM ionosphere_layers'
stmt = layers_query_string
# logger.debug('debug :: stmt - %s' % stmt)
result = connection.execute(stmt)
for row in result:
try:
layer_id = int(row['id'])
fp_id = int(row['fp_id'])
layer_matched_count = int(row['matched_count'])
layer_last_matched = int(row['last_matched'])
if str(layer_last_matched) == '0':
layer_human_date = 'never matched'
else:
layer_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(layer_last_matched)))
layer_last_checked = int(row['last_checked'])
# @modified 20170924 - Feature #2170: Ionosphere - validated matches
# Fixed variable typo which resulted in layer last checked
# field showing 1970-01-01 00:00:00 UTC (Thursday)
# if str(last_checked) == '0':
if str(layer_last_checked) == '0':
layer_checked_human_date = 'never checked'
else:
layer_checked_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(layer_last_checked)))
layer_check_count = int(row['check_count'])
layer_label = str(row['label'])
features_profiles_layers.append([layer_id, fp_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label])
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
logger.error('error :: bad row data')
connection.close()
features_profiles_layers.sort(key=operator.itemgetter(int(0)))
logger.debug('debug :: features_profiles length - %s' % str(len(features_profiles)))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table data'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# Add the layers information to the features_profiles list
features_profiles_and_layers = []
if features_profiles:
# @modified 20170402 - Feature #2000: Ionosphere - validated
for fp_id, metric_id, metric, full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id in features_profiles:
default_values = True
# @modified 20180816 - Feature #2430: Ionosphere validate learnt features profiles page
# Moved default_values to before the evalution as it was found
# that sometimes the features_profiles had 19 elements if a
# features profile had no layer or 23 elements if there was a
# layer
if default_values:
layer_id = 0
layer_matched_count = 0
layer_human_date = 'none'
layer_check_count = 0
layer_checked_human_date = 'none'
layer_label = 'none'
if int(fp_layers_id) > 0:
for layer_id, layer_fp_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label in features_profiles_layers:
if int(fp_layers_id) == int(layer_id):
default_values = False
break
features_profiles_and_layers.append([fp_id, metric_id, metric, full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label])
old_features_profile_list = features_profiles
features_profiles = features_profiles_and_layers
full_duration_list = None
# @modified 20170912 - Feature #2056: ionosphere - disabled_features_profiles
# enabled_list = None
if not enabled_list:
enabled_list = None
tsfresh_version_list = None
generation_list = None
if engine:
engine_disposal(engine)
try:
del all_fps
except:
logger.error('error :: failed to del all_fps')
try:
del metrics
except:
logger.error('error :: failed to del metrics')
search_success = True
return (features_profiles, features_profiles_count, matched_count,
checked_count, generation_count, full_duration_list,
enabled_list, tsfresh_version_list, generation_list,
search_success, fail_msg, trace)
get_options = [
'full_duration', 'enabled', 'tsfresh_version', 'generation']
if engine_needed and engine and default_query:
for required_option in get_options:
all_list = []
# required_option = 'full_duration'
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT %s FROM ionosphere WHERE enabled=1' % str(required_option) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
value = row[str(required_option)]
all_list.append(value)
connection.close()
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for options'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
if required_option == 'full_duration':
full_duration_list = set(all_list)
if required_option == 'enabled':
enabled_list = set(all_list)
if required_option == 'tsfresh_version':
tsfresh_version_list = set(all_list)
if required_option == 'generation':
generation_list = set(all_list)
if engine:
engine_disposal(engine)
try:
del all_fps
except:
logger.error('error :: failed to del all_fps')
try:
del metrics
except:
logger.error('error :: failed to del metrics')
search_success = True
return (features_profiles, features_profiles_count, matched_count,
checked_count, generation_count, full_duration_list,
enabled_list, tsfresh_version_list, generation_list, search_success,
fail_msg, trace)
# @added 20170305 - Feature #1960: ionosphere_layers
def create_ionosphere_layers(base_name, fp_id, requested_timestamp):
"""
Create a layers profile.
:param None: determined from :obj:`request.args`
:return: array
:rtype: array
"""
function_str = 'ionoshere_backend.py :: create_ionosphere_layers'
trace = 'none'
fail_msg = 'none'
layers_algorithms = None
layers_added = None
value_conditions = ['<', '>', '==', '!=', '<=', '>=']
conditions = ['<', '>', '==', '!=', '<=', '>=', 'in', 'not in']
if 'd_condition' in request.args:
d_condition = request.args.get('d_condition', '==')
else:
logger.error('no d_condition argument passed')
fail_msg = 'error :: no d_condition argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if not str(d_condition) in conditions:
logger.error('d_condition not a valid conditon - %s' % str(d_condition))
fail_msg = 'error :: d_condition not a valid conditon - %s' % str(d_condition)
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'd_boundary_limit' in request.args:
d_boundary_limit = request.args.get('d_boundary_limit', '0')
else:
logger.error('no d_boundary_limit argument passed')
fail_msg = 'error :: no d_boundary_limit argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# test_d_boundary_limit = int(d_boundary_limit) + 1
test_d_boundary_limit = float(d_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d_boundary_limit is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
# @modified 20160315 - Feature #1972: ionosphere_layers - use D layer boundary for upper limit
# Added d_boundary_times
if 'd_boundary_times' in request.args:
d_boundary_times = request.args.get('d_boundary_times', '1')
else:
logger.error('no d_boundary_times argument passed')
fail_msg = 'error :: no d_boundary_times argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_d_boundary_times = int(d_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d_boundary_times is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
# @added 20170616 - Feature #2048: D1 ionosphere layer
if 'd1_condition' in request.args:
d1_condition = request.args.get('d1_condition', 'none')
else:
logger.error('no d1_condition argument passed')
fail_msg = 'error :: no d1_condition argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if str(d1_condition) == 'none':
d1_condition = 'none'
d1_boundary_limit = 0
d1_boundary_times = 0
else:
if not str(d1_condition) in conditions:
logger.error('d1_condition not a valid conditon - %s' % str(d1_condition))
fail_msg = 'error :: d1_condition not a valid conditon - %s' % str(d1_condition)
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'd1_boundary_limit' in request.args:
d1_boundary_limit = request.args.get('d1_boundary_limit', '0')
else:
logger.error('no d1_boundary_limit argument passed')
fail_msg = 'error :: no d1_boundary_limit argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_d1_boundary_limit = float(d1_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d1_boundary_limit is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'd1_boundary_times' in request.args:
d1_boundary_times = request.args.get('d1_boundary_times', '1')
else:
logger.error('no d1_boundary_times argument passed')
fail_msg = 'error :: no d1_boundary_times argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_d1_boundary_times = int(d1_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d1_boundary_times is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'e_condition' in request.args:
e_condition = request.args.get('e_condition', None)
else:
logger.error('no e_condition argument passed')
fail_msg = 'error :: no e_condition argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if not str(e_condition) in value_conditions:
logger.error('e_condition not a valid value conditon - %s' % str(e_condition))
fail_msg = 'error :: e_condition not a valid value conditon - %s' % str(e_condition)
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'e_boundary_limit' in request.args:
e_boundary_limit = request.args.get('e_boundary_limit')
else:
logger.error('no e_boundary_limit argument passed')
fail_msg = 'error :: no e_boundary_limit argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# test_e_boundary_limit = int(e_boundary_limit) + 1
test_e_boundary_limit = float(e_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: e_boundary_limit is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
if 'e_boundary_times' in request.args:
e_boundary_times = request.args.get('e_boundary_times')
else:
logger.error('no e_boundary_times argument passed')
fail_msg = 'error :: no e_boundary_times argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
try:
test_e_boundary_times = int(e_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: e_boundary_times is not an int'
return False, False, layers_algorithms, layers_added, fail_msg, trace
es_layer = False
if 'es_layer' in request.args:
es_layer_arg = request.args.get('es_layer')
if es_layer_arg == 'true':
es_layer = True
if es_layer:
es_day = None
if 'es_day' in request.args:
es_day = request.args.get('es_day')
else:
logger.error('no es_day argument passed')
fail_msg = 'error :: no es_day argument passed'
return False, False, layers_algorithms, layers_added, fail_msg, trace
f1_layer = False
if 'f1_layer' in request.args:
f1_layer_arg = request.args.get('f1_layer')
if f1_layer_arg == 'true':
f1_layer = True
if f1_layer:
from_time = None
valid_f1_from_time = False
if 'from_time' in request.args:
from_time = request.args.get('from_time')
if from_time:
values_valid = True
if len(from_time) == 4:
for digit in from_time:
try:
int(digit) + 1
except:
values_valid = False
if values_valid:
if int(from_time) < 2400:
valid_f1_from_time = True
if not valid_f1_from_time:
logger.error('no valid f1_layer from_time argument passed - %s' % str(from_time))
fail_msg = 'error :: no valid f1_layer from_time argument passed - %s' % str(from_time)
return False, False, layers_algorithms, layers_added, fail_msg, trace
f2_layer = False
if 'f2_layer' in request.args:
f2_layer_arg = request.args.get('f2_layer')
if f2_layer_arg == 'true':
f2_layer = True
if f2_layer:
until_time = None
valid_f2_until_time = False
if 'until_time' in request.args:
until_time = request.args.get('until_time')
if until_time:
values_valid = True
if len(until_time) == 4:
for digit in until_time:
try:
int(digit) + 1
except:
values_valid = False
if values_valid:
if int(until_time) < 2400:
valid_f2_until_time = True
if not valid_f2_until_time:
logger.error('no valid f2_layer until_time argument passed - %s' % str(until_time))
fail_msg = 'error :: no valid f2_layer until_time argument passed - %s' % str(until_time)
return False, False, layers_algorithms, layers_added, fail_msg, trace
label = False
if 'fp_layer_label' in request.args:
label_arg = request.args.get('fp_layer_label')
label = label_arg[:255]
engine_needed = True
engine = None
if engine_needed:
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('metrics_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get metrics_table meta')
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metrics_id = 0
try:
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
for row in result:
metrics_id = int(row['id'])
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not determine metric id from metrics table'
if engine:
engine_disposal(engine)
raise
# Create layer profile
ionosphere_layers_table = None
try:
ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_layers_table meta for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
layer_id = 0
try:
connection = engine.connect()
stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.fp_id == fp_id)
result = connection.execute(stmt)
for row in result:
layer_id = int(row['id'])
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not determine id from ionosphere_layers_table'
if engine:
engine_disposal(engine)
raise
if layer_id > 0:
return layer_id, True, None, None, fail_msg, trace
new_layer_id = False
try:
connection = engine.connect()
ins = ionosphere_layers_table.insert().values(
fp_id=fp_id, metric_id=int(metrics_id), enabled=1, label=label)
result = connection.execute(ins)
connection.close()
new_layer_id = result.inserted_primary_key[0]
logger.info('new ionosphere layer_id: %s' % str(new_layer_id))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new record into the ionosphere_layers table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# Create layer profile
layers_algorithms_table = None
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get layers_algorithms_table meta for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
new_layer_algorithm_ids = []
layers_added = []
# D layer
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='D', type='value', condition=d_condition,
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# layer_boundary=int(d_boundary_limit),
layer_boundary=str(d_boundary_limit),
# @modified 20160315 - Feature #1972: ionosphere_layers - use D layer boundary for upper limit
# Added d_boundary_times
times_in_row=int(d_boundary_times))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms D layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('D')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new D layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# E layer
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='E', type='value', condition=e_condition,
# @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats
# layer_boundary=int(e_boundary_limit),
layer_boundary=str(e_boundary_limit),
times_in_row=int(e_boundary_times))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms E layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('E')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new E layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# @added 20170616 - Feature #2048: D1 ionosphere layer
# This must be the third created algorithm layer as in the frontend list
# D is [0], E is [1], so D1 has to be [2]
if d1_condition:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='D1', type='value', condition=d1_condition,
layer_boundary=str(d1_boundary_limit),
times_in_row=int(d1_boundary_times))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms D1 layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('D1')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new D1 layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# Es layer
if es_layer:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='Es', type='day', condition='in', layer_boundary=es_day)
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms Es layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('Es')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new Es layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# F1 layer
if f1_layer:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='F1', type='time', condition='>',
layer_boundary=str(from_time))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms F1 layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('F1')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new F1 layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# F2 layer
if f2_layer:
try:
connection = engine.connect()
ins = layers_algorithms_table.insert().values(
layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id),
layer='F2', type='time', condition='<',
layer_boundary=str(until_time))
result = connection.execute(ins)
connection.close()
new_layer_algorithm_id = result.inserted_primary_key[0]
logger.info('new ionosphere_algorithms F2 layer id: %s' % str(new_layer_algorithm_id))
new_layer_algorithm_ids.append(new_layer_algorithm_id)
layers_added.append('F2')
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to insert a new F2 layer record into the layers_algorithms table for %s' % base_name
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
ionosphere_table = None
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_table meta for options'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
logger.info('%s :: ionosphere_table OK' % function_str)
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(layers_id=new_layer_id))
connection.close()
logger.info('updated layers_id for %s' % str(fp_id))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: could not update layers_id for %s ' % str(fp_id)
logger.error(fail_msg)
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal
if engine:
engine_disposal(engine)
raise
if engine:
engine_disposal(engine)
return new_layer_id, True, layers_added, new_layer_algorithm_ids, fail_msg, trace
def feature_profile_layers_detail(fp_layers_id):
"""
Get the Ionosphere layers details of a fetures profile
:param fp_layers_id: the features profile layers_id
:type fp_id: str
:return: tuple
:rtype: (str, boolean, str, str, object)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: features_profile_layers_details'
trace = 'none'
fail_msg = 'none'
# fp_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
ionosphere_layers_table = None
try:
ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_layers_table meta for fp_id %s details' % str(fp_layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: ionosphere_layers_table OK' % function_str)
try:
connection = engine.connect()
stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.id == int(fp_layers_id))
result = connection.execute(stmt)
row = result.fetchone()
layer_details_object = row
connection.close()
feature_profile_id = row['fp_id']
metric_id = row['metric_id']
enabled = row['enabled']
deleted = row['deleted']
matched_count = row['matched_count']
last_matched = row['last_matched']
if str(last_matched) == '0':
human_date = 'never matched'
else:
human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_matched)))
created_timestamp = row['created_timestamp']
last_checked = row['last_checked']
if str(last_checked) == '0':
checked_human_date = 'never checked'
else:
checked_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(last_checked)))
check_count = row['check_count']
label = row['label']
layer_details = '''
fp_id :: %s | metric_id :: %s
enabled :: %s
deleted :: %s
matched_count :: %s
last_matched :: %s | human_date :: %s
created_timestamp :: %s
checked_count :: %s
last_checked :: %s | human_date :: %s
label :: %s
''' % (str(feature_profile_id), str(metric_id), str(enabled), str(deleted),
str(matched_count), str(last_matched), str(human_date),
str(created_timestamp), str(check_count),
str(last_checked), str(checked_human_date), str(label))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers_id %s details from ionosphere_layers DB table' % str(fp_layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
return layer_details, True, fail_msg, trace, layer_details_object
def feature_profile_layer_alogrithms(fp_layers_id):
"""
Get the Ionosphere layer algorithm details of a layer
:param fp_layers_id: the features profile layers_id
:type fp_id: str
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: features_profile_layer_algorithms'
trace = 'none'
fail_msg = 'none'
# fp_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
layers_algorithms_table = None
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get layers_algorithms_table meta for fp_id %s details' % str(fp_layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: layers_algorithms_table OK' % function_str)
es_condition = None
es_day = None
es_layer = ' [\'NOT ACTIVE - Es layer not created\']'
f1_from_time = None
f1_layer = ' [\'NOT ACTIVE - F1 layer not created\']'
f2_until_time = None
f2_layer = ' [\'NOT ACTIVE - F2 layer not created\']'
# @added 20170616 - Feature #2048: D1 ionosphere layer
d1_layer = ' [\'NOT ACTIVE - D1 layer not created\']'
d1_condition = 'none'
d1_boundary_limit = 'none'
d1_boundary_times = 'none'
try:
connection = engine.connect()
stmt = select([layers_algorithms_table]).where(layers_algorithms_table.c.layer_id == int(fp_layers_id))
result = connection.execute(stmt)
connection.close()
layer_algorithms_details_object = result
layer_active = '[\'ACTIVE\']'
for row in result:
layer = row['layer']
if layer == 'D':
d_condition = row['condition']
d_boundary_limit = row['layer_boundary']
# @added 20170616 - Feature #2048: D1 ionosphere layer
if layer == 'D1':
d1_condition = row['condition']
if str(d1_condition) != 'none':
d1_condition = row['condition']
d1_layer = ' [\'ACTIVE\']'
d1_boundary_limit = row['layer_boundary']
d1_boundary_times = row['times_in_row']
else:
d1_condition = 'none'
if layer == 'E':
e_condition = row['condition']
e_boundary_limit = row['layer_boundary']
e_boundary_times = row['times_in_row']
if layer == 'Es':
es_condition = row['condition']
es_day = row['layer_boundary']
es_layer = layer_active
if layer == 'F1':
f1_from_time = row['layer_boundary']
f1_layer = layer_active
if layer == 'F2':
f2_until_time = row['layer_boundary']
f2_layer = layer_active
layer_algorithms_details = '''
D layer :: if value %s %s :: [do not check] :: ['ACTIVE']
D1 layer :: if value %s %s in last %s values :: [do not check] :: %s
E layer :: if value %s %s in last %s values :: [not_anomalous, if active Es, F1 and F2 layers match] :: ['ACTIVE']
Es layer :: if day %s %s :: [not_anomalous, if active F1 and F2 layers match] :: %s
F1 layer :: if from_time > %s :: [not_anomalous, if active F2 layer matchs] :: %s
F2 layer :: if until_time < %s :: [not_anomalous] :: %s
''' % (str(d_condition), str(d_boundary_limit), str(d1_condition),
str(d1_boundary_limit), str(d1_boundary_times), str(d1_layer),
str(e_condition), str(e_boundary_limit), str(e_boundary_times),
str(es_condition), str(es_day),
str(es_layer), str(f1_from_time), str(f1_layer), str(f2_until_time),
str(f2_layer))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers_algorithms for layer_id %s from layers_algorithms DB table' % str(fp_layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
return layer_algorithms_details, True, fail_msg, trace, layer_algorithms_details_object
# @added 20170308 - Feature #1960: ionosphere_layers
# To present the operator with the existing layers and algorithms for the metric
def metric_layers_alogrithms(base_name):
"""
Get the Ionosphere layer algorithm details of a metric
:param base_name: the metric base_name
:type base_name: str
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: metric_layers_alogrithms'
trace = 'none'
fail_msg = 'none'
metric_layers_algorithm_details = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise # to webapp to return in the UI
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('metrics_table OK')
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: failed to get metrics_table meta'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metric_id = 0
try:
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
connection.close()
for row in result:
metric_id = int(row['id'])
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: failed to get id for %s from metrics table' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if not metric_id:
# @added 20181024 - Bug #2638: anomalies db table - anomalous_datapoint greater than DECIMAL
# For debugging
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: no id for %s' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
ionosphere_layers_table = None
try:
ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get ionosphere_layers_table meta for %s details' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metric_layers_details = []
metric_layers_count = 0
metric_layers_matched_count = 0
try:
connection = engine.connect()
stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.metric_id == metric_id)
result = connection.execute(stmt)
connection.close()
for row in result:
try:
l_id = row['id']
l_fp_id = row['fp_id']
l_metric_id = row['metric_id']
l_matched_count = row['matched_count']
l_check_count = row['check_count']
l_label = str(row['label'])
metric_layers_details.append([l_id, l_fp_id, l_metric_id, l_matched_count, l_check_count, l_label])
metric_layers_count += 1
metric_layers_matched_count += int(l_matched_count)
logger.info('%s :: added layer id %s to layer count' % (function_str, str(l_id)))
except:
metric_layers_count += 0
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers ids for metric_id %s from ionosphere_layers DB table' % str(metric_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
layers_algorithms_table = None
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to get layers_algorithms_table meta for base_name %s details' % str(base_name)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
metric_layers_algorithm_details = []
logger.info('%s :: layers_algorithms_table OK' % function_str)
try:
connection = engine.connect()
stmt = select([layers_algorithms_table]).where(layers_algorithms_table.c.metric_id == metric_id)
result = connection.execute(stmt)
connection.close()
for row in result:
la_id = row['id']
la_layer_id = row['layer_id']
la_fp_id = row['fp_id']
la_metric_id = row['metric_id']
la_layer = str(row['layer'])
la_type = str(row['type'])
la_condition = str(row['condition'])
la_layer_boundary = str(row['layer_boundary'])
la_times_in_a_row = row['times_in_row']
metric_layers_algorithm_details.append([la_id, la_layer_id, la_fp_id, la_metric_id, la_layer, la_type, la_condition, la_layer_boundary, la_times_in_a_row])
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers_algorithms for metric_id %s from layers_algorithms DB table' % str(metric_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
logger.info('metric_layers_details :: %s' % str(metric_layers_details))
logger.info('metric_layers_algorithm_details :: %s' % str(metric_layers_algorithm_details))
return metric_layers_details, metric_layers_algorithm_details, metric_layers_count, metric_layers_matched_count, True, fail_msg, trace
# @added 20170327 - Feature #2004: Ionosphere layers - edit_layers
# Task #2002: Review and correct incorrectly defined layers
def edit_ionosphere_layers(layers_id):
"""
Edit a layers profile.
:param layers_id: the layer id to edit
:return: array
:rtype: array
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: edit_ionosphere_layers'
logger.info('updating layers for %s' % str(layers_id))
trace = 'none'
fail_msg = 'none'
value_conditions = ['<', '>', '==', '!=', '<=', '>=']
conditions = ['<', '>', '==', '!=', '<=', '>=', 'in', 'not in']
if 'd_condition' in request.args:
d_condition = request.args.get('d_condition', '==')
else:
logger.error('no d_condition argument passed')
fail_msg = 'error :: no d_condition argument passed'
return False, fail_msg, trace
if not str(d_condition) in conditions:
logger.error('d_condition not a valid conditon - %s' % str(d_condition))
fail_msg = 'error :: d_condition not a valid conditon - %s' % str(d_condition)
return False, fail_msg, trace
if 'd_boundary_limit' in request.args:
d_boundary_limit = request.args.get('d_boundary_limit', '0')
else:
logger.error('no d_boundary_limit argument passed')
fail_msg = 'error :: no d_boundary_limit argument passed'
return False, fail_msg, trace
try:
test_d_boundary_limit = float(d_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d_boundary_limit is not an int'
return False, fail_msg, trace
if 'd_boundary_times' in request.args:
d_boundary_times = request.args.get('d_boundary_times', '1')
else:
logger.error('no d_boundary_times argument passed')
fail_msg = 'error :: no d_boundary_times argument passed'
return False, fail_msg, trace
try:
test_d_boundary_times = int(d_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d_boundary_times is not an int'
return False, fail_msg, trace
# @added 20170616 - Feature #2048: D1 ionosphere layer
d1_condition = None
if 'd1_condition' in request.args:
d1_condition = request.args.get('d1_condition', 'none')
else:
logger.error('no d1_condition argument passed')
fail_msg = 'error :: no d1_condition argument passed'
return False, fail_msg, trace
if str(d1_condition) == 'none':
d1_condition = None
else:
if not str(d1_condition) in conditions:
logger.error('d1_condition not a valid conditon - %s' % str(d1_condition))
fail_msg = 'error :: d1_condition not a valid conditon - %s' % str(d1_condition)
return False, fail_msg, trace
if 'd1_boundary_limit' in request.args:
d1_boundary_limit = request.args.get('d1_boundary_limit', '0')
else:
logger.error('no d1_boundary_limit argument passed')
fail_msg = 'error :: no d1_boundary_limit argument passed'
return False, fail_msg, trace
try:
test_d1_boundary_limit = float(d1_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d1_boundary_limit is not an int'
return False, fail_msg, trace
if 'd1_boundary_times' in request.args:
d1_boundary_times = request.args.get('d1_boundary_times', '1')
else:
logger.error('no d1_boundary_times argument passed')
fail_msg = 'error :: no d1_boundary_times argument passed'
return False, fail_msg, trace
try:
test_d1_boundary_times = int(d1_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: d1_boundary_times is not an int'
return False, fail_msg, trace
if 'e_condition' in request.args:
e_condition = request.args.get('e_condition', None)
else:
logger.error('no e_condition argument passed')
fail_msg = 'error :: no e_condition argument passed'
return False, fail_msg, trace
if not str(e_condition) in value_conditions:
logger.error('e_condition not a valid value conditon - %s' % str(e_condition))
fail_msg = 'error :: e_condition not a valid value conditon - %s' % str(e_condition)
return False, fail_msg, trace
if 'e_boundary_limit' in request.args:
e_boundary_limit = request.args.get('e_boundary_limit')
else:
logger.error('no e_boundary_limit argument passed')
fail_msg = 'error :: no e_boundary_limit argument passed'
return False, fail_msg, trace
try:
test_e_boundary_limit = float(e_boundary_limit) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: e_boundary_limit is not an int'
return False, fail_msg, trace
if 'e_boundary_times' in request.args:
e_boundary_times = request.args.get('e_boundary_times')
else:
logger.error('no e_boundary_times argument passed')
fail_msg = 'error :: no e_boundary_times argument passed'
return False, fail_msg, trace
try:
test_e_boundary_times = int(e_boundary_times) + 1
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: e_boundary_times is not an int'
return False, fail_msg, trace
# NOT IMPLEMENTED YET
es_layer = False
f1_layer = False
f2_layer = False
update_label = False
if 'fp_layer_label' in request.args:
label_arg = request.args.get('fp_layer_label')
update_label = label_arg[:255]
engine_needed = True
engine = None
ionosphere_layers_table = None
layers_algorithms_table = None
if engine_needed:
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_layers_table meta for layers_id %s' % (str(layers_id))
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
try:
layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get layers_algorithms_table meta for layers_id %s' % (str(layers_id))
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if update_label:
# Update layers_id label
try:
connection = engine.connect()
connection.execute(
ionosphere_layers_table.update(
ionosphere_layers_table.c.id == layers_id).
values(label=update_label))
connection.close()
logger.info('updated label for %s - %s' % (str(layers_id), str(update_label)))
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not update label for layers_id %s ' % str(layers_id))
fail_msg = 'error :: could not update label for layers_id %s ' % str(layers_id)
if engine:
engine_disposal(engine)
raise
layers_algorithms = []
try:
connection = engine.connect()
stmt = select([layers_algorithms_table]).where(layers_algorithms_table.c.layer_id == layers_id)
result = connection.execute(stmt)
connection.close()
for row in result:
la_id = row['id']
la_layer = str(row['layer'])
layers_algorithms.append([la_id, la_layer])
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get layers_algorithms for layer id %s from layers_algorithms DB table' % str(layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
# Remake D and E layers as defined by arguments
for algorithm_id, layer_name in layers_algorithms:
# D layer
if layer_name == 'D':
try:
connection = engine.connect()
connection.execute(
layers_algorithms_table.update(
layers_algorithms_table.c.id == algorithm_id).values(
condition=d_condition, layer_boundary=d_boundary_limit,
times_in_row=d_boundary_times))
connection.close()
logger.info('updated D layer for %s - %s, %s, %s' % (
str(layers_id), str(d_condition), str(d_boundary_limit),
str(d_boundary_times)))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to update D layer record into the layers_algorithms table for %s' % str(layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# @added 20170616 - Feature #2048: D1 ionosphere layer
if d1_condition and layer_name == 'D1':
try:
connection = engine.connect()
connection.execute(
layers_algorithms_table.update(
layers_algorithms_table.c.id == algorithm_id).values(
condition=d1_condition, layer_boundary=d1_boundary_limit,
times_in_row=d1_boundary_times))
connection.close()
logger.info('updated D1 layer for %s - %s, %s, %s' % (
str(layers_id), str(d1_condition), str(d1_boundary_limit),
str(d1_boundary_times)))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to update D1 layer record into the layers_algorithms table for %s' % str(layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
# E layer
if layer_name == 'E':
try:
connection = engine.connect()
connection.execute(
layers_algorithms_table.update(
layers_algorithms_table.c.id == algorithm_id).values(
condition=e_condition, layer_boundary=e_boundary_limit,
times_in_row=e_boundary_times))
connection.close()
logger.info('updated E layer for %s - %s, %s, %s' % (
str(layers_id), str(e_condition), str(e_boundary_limit),
str(e_boundary_times)))
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: failed to update E layer record into the layers_algorithms table for %s' % str(layers_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise
if engine:
engine_disposal(engine)
return True, fail_msg, trace
# @added 20170402 - Feature #2000: Ionosphere - validated
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Extended the validate_fp function to validate a single fp id or all the unvalidated,
# enabled features profiles for a metric_id
# def validate_fp(fp_id):
def validate_fp(update_id, id_column_name):
"""
Validate a single features profile or validate all enabled, unvalidated
features profiles for a metric_id.
:param update_id: the features profile id or metric_id to validate
:type update_id: int
:param id_column_name: the column name to select where on, e.g. id or metric_id
:type where: str
:return: tuple
:rtype: (boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: validate_fp'
# @added 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
fp_id = update_id
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
if id_column_name == 'id':
logger.info('%s validating fp_id %s' % (function_str, str(fp_id)))
if id_column_name == 'metric_id':
logger.info('%s validating all enabled and unvalidated features profiles for metric_id - %s' % (function_str, str(update_id)))
trace = 'none'
fail_msg = 'none'
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_table meta for fp_id %s' % (str(fp_id))
if id_column_name == 'id':
fail_msg = 'error :: ionosphere_backend :: %s :: failed to get ionosphere_table meta for fp_id %s' % (function_str, str(fp_id))
if id_column_name == 'metric_id':
fail_msg = 'error :: ionosphere_backend :: %s :: failed to get ionosphere_table meta for metric_id - %s' % (function_str, str(update_id))
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
try:
connection = engine.connect()
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_table meta for fp_id %s' % (str(fp_id))
if id_column_name == 'id':
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == int(fp_id)).
values(validated=1))
if id_column_name == 'metric_id':
stmt = ionosphere_table.update().\
values(validated=1).\
where(ionosphere_table.c.metric_id == int(update_id)).\
where(ionosphere_table.c.validated == 0).\
where(ionosphere_table.c.enabled == 1)
connection.execute(stmt)
connection.close()
if id_column_name == 'id':
logger.info('updated validated for %s' % (str(fp_id)))
if id_column_name == 'metric_id':
logger.info('updated validated for all enabled, unvalidated features profiles for metric_id - %s' % (str(update_id)))
except:
trace = traceback.format_exc()
logger.error(trace)
if id_column_name == 'id':
logger.error('error :: could not update validated for fp_id %s ' % str(fp_id))
fail_msg = 'error :: could not update validated label for fp_id %s ' % str(fp_id)
if id_column_name == 'metric_id':
logger.error('error :: could not update validated for all enabled, unvalidated features profiles for metric_id - %s ' % str(update_id))
fail_msg = 'error :: could not update validated labels for all enabled, unvalidated features profiles for metric_id - %s ' % str(update_id)
if engine:
engine_disposal(engine)
raise
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal
if engine:
engine_disposal(engine)
if id_column_name == 'id':
return True, fail_msg, trace
if id_column_name == 'metric_id':
return True, fail_msg, trace
# @added 20170617 - Feature #2054: ionosphere.save.training_data
def save_training_data_dir(timestamp, base_name, label, hdate):
"""
Save training_data and return details or just return details if exists
:param timestamp: the Ionosphere training_data metric timestamp
:param base_name: metric base_name
:param label: the saved training_data label
:param hdate: human date for the saved training_data
:type timestamp: str
:type base_name: str
:type label: str
:type hdate: str
:return: saved_successful, details, fail_msg, trace
:rtype: boolean, list, str, str
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: save_training_data'
trace = 'none'
fail_msg = 'none'
training_data_saved = True
logger.info(
'%s :: Saving training_data for %s.%s' % (
function_str, (timestamp), str(base_name)))
metric_timeseries_dir = base_name.replace('.', '/')
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(timestamp),
metric_timeseries_dir)
saved_metric_training_data_dir = '%s_saved/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(timestamp),
metric_timeseries_dir)
details_file = '%s/%s.%s.saved_training_data_label.txt' % (saved_metric_training_data_dir, str(timestamp), base_name)
if path.isfile(details_file):
logger.info(
'%s :: Saved training_data for %s.%s already exists' % (
function_str, (timestamp), str(base_name)))
saved_training_data_details = []
try:
with open(details_file) as f:
for line in f:
saved_training_data_details.append(line)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = '%s :: error :: failed to read details file %s' % (function_str, details_file)
logger.error('%s' % fail_msg)
raise
return True, saved_training_data_details, fail_msg, trace
if not path.exists(saved_metric_training_data_dir):
try:
mkdir_p(saved_metric_training_data_dir)
logger.info(
'%s :: created %s' % (function_str, saved_metric_training_data_dir))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = '%s :: error :: failed to create %s' % (function_str, saved_metric_training_data_dir)
logger.error('%s' % fail_msg)
training_data_saved = False
if training_data_saved:
save_data_files = []
try:
glob_path = '%s/*.*' % metric_training_data_dir
save_data_files = glob.glob(glob_path)
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error(
'%s :: error :: glob %s - training data not copied to %s' % (
function_str, metric_training_data_dir, saved_metric_training_data_dir))
fail_msg = 'error :: glob failed to copy'
logger.error('%s' % fail_msg)
training_data_saved = False
if not training_data_saved:
raise
for i_file in save_data_files:
try:
shutil.copy(i_file, saved_metric_training_data_dir)
logger.info(
'%s :: training data copied to %s/%s' % (
function_str, saved_metric_training_data_dir, i_file))
except shutil.Error as e:
trace = traceback.format_exc()
logger.error('%s' % trace)
logger.error(
'%s :: error :: shutil error - %s - not copied to %s' % (
function_str, i_file, saved_metric_training_data_dir))
logger.error('%s :: error :: %s' % (function_str, e))
training_data_saved = False
fail_msg = 'error :: shutil error'
# Any error saying that the directory doesn't exist
except OSError as e:
trace = traceback.format_exc()
logger.error('%s' % trace)
logger.error(
'%s :: error :: OSError error %s - training data not copied to %s' % (
function_str, metric_training_data_dir, saved_metric_training_data_dir))
logger.error(
'%s :: error :: %s' % (function_str, e))
training_data_saved = False
fail_msg = 'error :: shutil error'
if not training_data_saved:
raise
# Create a label file
try:
saved_training_data_details = '[[label: \'%s\'], [saved_date: \'%s\']]' % (str(label), str(hdate))
write_data_to_file(skyline_app, details_file, 'w', saved_training_data_details)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = '%s :: error :: failed to write label file' % (function_str)
logger.error('%s' % fail_msg)
return True, False, fail_msg, trace
# added 20170908 - Feature #2056: ionosphere - disabled_features_profiles
def features_profile_family_tree(fp_id):
"""
Returns the all features profile ids of the related progeny features
profiles, the whole family tree.
:param fp_id: the features profile id
:return: array
:rtype: array
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: features_profile_progeny'
logger.info('%s getting the features profile ids of the progeny of fp_id %s' % (function_str, str(fp_id)))
trace = 'none'
fail_msg = 'none'
current_fp_id = int(fp_id)
family_tree_fp_ids = [current_fp_id]
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_table meta for fp_id %s' % (str(fp_id))
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
row = current_fp_id
while row:
try:
connection = engine.connect()
stmt = select([ionosphere_table]).where(ionosphere_table.c.parent_id == current_fp_id)
result = connection.execute(stmt)
connection.close()
row = None
for row in result:
progeny_id = row['id']
family_tree_fp_ids.append(int(progeny_id))
current_fp_id = progeny_id
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get id for %s' % str(current_fp_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
return family_tree_fp_ids, fail_msg, trace
# added 20170908 - Feature #2056: ionosphere - disabled_features_profiles
def disable_features_profile_family_tree(fp_ids):
"""
Disable a features profile and all related progeny features profiles
:param fp_ids: a list of the the features profile ids to disable
:return: array
:rtype: array
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: disable_features_profile_and_progeny'
logger.info('%s disabling fp ids - %s' % (function_str, str(fp_ids)))
trace = 'none'
fail_msg = 'none'
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
raise
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
raise
try:
ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_table meta for disable_features_profile_family_tree'
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
raise # to webapp to return in the UI
for fp_id in fp_ids:
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == int(fp_id)).
values(enabled=0))
connection.close()
logger.info('updated enabled for %s to 0' % (str(fp_id)))
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not update enabled for fp_id %s ' % str(fp_id))
fail_msg = 'error :: could not update enabled for fp_id %s ' % str(fp_id)
if engine:
engine_disposal(engine)
raise
if engine:
engine_disposal(engine)
return True, fail_msg, trace
# @added 20170915 - Feature #1996: Ionosphere - matches page
def get_fp_matches(metric, metric_like, get_fp_id, get_layer_id, from_timestamp, until_timestamp, limit, sort):
"""
Get all the matches.
:param metric: all or the metric name
:param metric_like: False or the metric MySQL like string e.g statsd.%
:param get_fp_id: None or int
:param get_layer_id: None or int
:param from_timestamp: timestamp or None
:param until_timestamp: timestamp or None
:param limit: None or number to limit to
:param sort: DESC or ASC
:return: list
:rtype: list
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: get_fp_matches'
logger.info('%s getting matches' % (function_str))
logger.info('arguments :: %s, %s, %s, %s, %s, %s, %s, %s' % (
str(metric), str(metric_like), str(get_fp_id), str(get_layer_id),
str(from_timestamp), str(until_timestamp), str(limit),
str(sort)))
trace = 'none'
fail_msg = 'none'
if settings.MEMCACHE_ENABLED:
memcache_client = pymemcache_Client((settings.MEMCACHED_SERVER_IP, settings.MEMCACHED_SERVER_PORT), connect_timeout=0.1, timeout=0.2)
else:
memcache_client = None
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
return False, fail_msg, trace
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
return False, fail_msg, trace
query_string = 'SELECT * FROM ionosphere_matched'
needs_and = False
if metric and metric != 'all':
metric_id_stmt = 'SELECT id FROM metrics WHERE metric=\'%s\'' % str(metric)
metric_id = None
logger.info('metric set to %s' % str(metric))
try:
connection = engine.connect()
result = connection.execute(metric_id_stmt)
connection.close()
for row in result:
if not metric_id:
metric_id = int(row[0])
logger.info('metric_id set to %s' % str(metric_id))
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine id from metrics table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
fp_ids_stmt = 'SELECT id FROM ionosphere WHERE metric_id=%s' % str(metric_id)
fp_ids = ''
try:
connection = engine.connect()
results = connection.execute(fp_ids_stmt)
connection.close()
for row in results:
fp_id = str(row[0])
if fp_ids == '':
fp_ids = '%s' % (fp_id)
else:
new_fp_ids = '%s, %s' % (fp_ids, fp_id)
fp_ids = new_fp_ids
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine id from metrics table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
logger.info('fp_ids set to %s' % str(fp_ids))
query_string = 'SELECT * FROM ionosphere_matched WHERE fp_id in (%s)' % str(fp_ids)
needs_and = True
# if 'metric_like' in request.args:
if metric_like:
if metric_like and metric_like != 'all':
# SQLAlchemy requires the MySQL wildcard % to be %% to prevent
# interpreting the % as a printf-like format character
python_escaped_metric_like = metric_like.replace('%', '%%')
# nosec to exclude from bandit tests
metrics_like_query = 'SELECT id FROM metrics WHERE metric LIKE \'%s\'' % (str(python_escaped_metric_like)) # nosec
logger.info('executing metrics_like_query - %s' % metrics_like_query)
metric_ids = ''
try:
connection = engine.connect()
results = connection.execute(metrics_like_query)
connection.close()
for row in results:
metric_id = str(row[0])
if metric_ids == '':
metric_ids = '%s' % (metric_id)
else:
new_metric_ids = '%s, %s' % (metric_ids, metric_id)
metric_ids = new_metric_ids
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine ids from metrics table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
fp_ids_stmt = 'SELECT id FROM ionosphere WHERE metric_id IN (%s)' % str(metric_ids)
fp_ids = ''
try:
connection = engine.connect()
results = connection.execute(fp_ids_stmt)
connection.close()
for row in results:
fp_id = str(row[0])
if fp_ids == '':
fp_ids = '%s' % (fp_id)
else:
new_fp_ids = '%s, %s' % (fp_ids, fp_id)
fp_ids = new_fp_ids
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine id from metrics table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
query_string = 'SELECT * FROM ionosphere_matched WHERE fp_id in (%s)' % str(fp_ids)
needs_and = True
# @added 20170917 - Feature #1996: Ionosphere - matches page
# Added by fp_id or layer_id as well
get_features_profiles_matched = True
get_layers_matched = True
if get_fp_id or get_layer_id:
if get_fp_id:
logger.info('get_fp_id set to %s' % str(get_fp_id))
if get_fp_id != '0':
get_layers_matched = False
query_string = 'SELECT * FROM ionosphere_matched WHERE fp_id=%s' % str(get_fp_id)
if get_layer_id:
logger.info('get_layer_id set to %s' % str(get_layer_id))
if get_layer_id != '0':
get_features_profiles_matched = False
query_string = 'SELECT * FROM ionosphere_layers_matched WHERE layer_id=%s' % str(get_layer_id)
fp_id_query_string = 'SELECT fp_id FROM ionosphere_layers WHERE id=%s' % str(get_layer_id)
fp_id = None
try:
connection = engine.connect()
result = connection.execute(fp_id_query_string)
connection.close()
for row in result:
if not fp_id:
fp_id = int(row[0])
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine id from metrics table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
needs_and = True
if 'from_timestamp' in request.args:
from_timestamp = request.args.get('from_timestamp', None)
if from_timestamp and from_timestamp != 'all':
if ":" in from_timestamp:
import datetime
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
from_timestamp = str(int(new_from_timestamp))
if needs_and:
new_query_string = '%s AND metric_timestamp >= %s' % (query_string, from_timestamp)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE metric_timestamp >= %s' % (query_string, from_timestamp)
query_string = new_query_string
needs_and = True
if 'until_timestamp' in request.args:
until_timestamp = request.args.get('until_timestamp', None)
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
import datetime
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
until_timestamp = str(int(new_until_timestamp))
if needs_and:
new_query_string = '%s AND metric_timestamp <= %s' % (query_string, until_timestamp)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE metric_timestamp <= %s' % (query_string, until_timestamp)
query_string = new_query_string
needs_and = True
ordered_by = None
if 'order' in request.args:
order = request.args.get('order', 'DESC')
if str(order) == 'DESC':
ordered_by = 'DESC'
if str(order) == 'ASC':
ordered_by = 'ASC'
if ordered_by:
new_query_string = '%s ORDER BY id %s' % (query_string, ordered_by)
query_string = new_query_string
if 'limit' in request.args:
limit = request.args.get('limit', '30')
try:
test_limit = int(limit) + 0
if int(limit) != 0:
new_query_string = '%s LIMIT %s' % (query_string, str(limit))
query_string = new_query_string
except:
logger.error('error :: limit is not an integer - %s' % str(limit))
# Get ionosphere_summary memcache object from which metric names will be
# determined
memcache_result = None
ionosphere_summary_list = None
if settings.MEMCACHE_ENABLED:
try:
memcache_result = memcache_client.get('ionosphere_summary_list')
except:
logger.error('error :: failed to get ionosphere_summary_list from memcache')
try:
memcache_client.close()
# Added nosec to exclude from bandit tests
except: # nosec
pass
if memcache_result:
try:
logger.info('using memcache ionosphere_summary_list key data')
ionosphere_summary_list = literal_eval(memcache_result)
except:
logger.error('error :: failed to process data from memcache key - ionosphere_summary_list')
ionosphere_summary_list = False
if not ionosphere_summary_list:
stmt = "SELECT ionosphere.id, ionosphere.metric_id, metrics.metric FROM ionosphere INNER JOIN metrics ON ionosphere.metric_id=metrics.id"
try:
connection = engine.connect()
results = connection.execute(stmt)
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine metrics from metrics table')
# Disposal and raise for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
if results:
# Because the each row in the results is a dict and all the rows are
# being used, these are being converted into a list and stored in
# memcache as a list
ionosphere_summary_list = []
for row in results:
ionosphere_summary_list.append([int(row['id']), int(row['metric_id']), str(row['metric'])])
if settings.MEMCACHE_ENABLED:
try:
memcache_client.set('ionosphere_summary_list', ionosphere_summary_list, expire=600)
logger.info('set memcache ionosphere_summary_list key with DB results')
except:
logger.error('error :: failed to get ionosphere_summary_list from memcache')
try:
memcache_client.close()
# Added nosec to exclude from bandit tests
except: # nosec
pass
# ionosphere_matched table layout
# | id | fp_id | metric_timestamp | all_calc_features_sum | all_calc_features_count | sum_common_values | common_features_count | tsfresh_version |
# | 39793 | 782 | 1505560867 | 9856.36758282061 | 210 | 9813.63277426169 | 150 | 0.4.0 |
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling
# | id | fp_id | metric_timestamp | all_calc_features_sum | all_calc_features_count | sum_common_values | common_features_count | tsfresh_version | minmax | minmax_fp_features_sum | minmax_fp_features_count | minmax_anomalous_features_sum | minmax_anomalous_features_count |
# | 68071 | 3352 | 1529490602 | 383311386.647846 | 210 | 383283135.786868 | 150 | 0.4.0 | 1 | 4085.7427786846 | 210 | 4048.14642205812 | 210 |
# ionosphere_layers_matched table layout
# | id | layer_id | fp_id | metric_id | anomaly_timestamp | anomalous_datapoint | full_duration |
# | 25069 | 24 | 1108 | 195 | 1505561823 | 2.000000 | 86400 |
matches = []
# matches list elements - where id is the ionosphere_matched or the
# ionosphere_layers_matched table id for the match being processed
# [metric_timestamp, id, matched_by, fp_id, layer_id, metric, uri_to_matched_page]
# e.g.
# [[1505560867, 39793, 'features_profile', 782, 'None', 'stats.skyline-dev-3-40g-gra1.vda.ioInProgress', 'ionosphere?fp_matched=true...'],
# [1505561823, 25069, 'layers', 1108, 24, 'stats.controller-dev-3-40g-sbg1.apache.sending', 'ionosphere?fp_matched=true...']]
if get_features_profiles_matched:
try:
connection = engine.connect()
stmt = query_string
logger.info('executing %s' % stmt)
results = connection.execute(stmt)
connection.close()
except:
trace = traceback.format_exc()
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics from metrics table')
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal and raise
if engine:
engine_disposal(engine)
return False, fail_msg, trace
for row in results:
metric_timestamp = int(row['metric_timestamp'])
metric_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(metric_timestamp)))
match_id = int(row['id'])
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling
# matched_by = 'features profile'
minmax = int(row['minmax'])
if minmax == 0:
matched_by = 'features profile'
else:
matched_by = 'features profile - minmax'
fp_id = int(row['fp_id'])
layer_id = 'None'
# Get metric name, first get metric id from the features profile
# record
try:
metric_list = [row[2] for row in ionosphere_summary_list if row[0] == fp_id]
metric = metric_list[0]
except:
metric = 'UNKNOWN'
uri_to_matched_page = 'None'
matches.append([metric_human_date, match_id, matched_by, fp_id, layer_id, metric, uri_to_matched_page])
if get_layers_matched:
# layers matches
new_query_string = query_string.replace('ionosphere_matched', 'ionosphere_layers_matched')
query_string = new_query_string
new_query_string = query_string.replace('metric_timestamp', 'anomaly_timestamp')
query_string = new_query_string
try:
connection = engine.connect()
stmt = query_string
logger.info('executing %s' % stmt)
results = connection.execute(stmt)
connection.close()
except:
trace = traceback.format_exc()
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics from metrics table')
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal and raise
if engine:
engine_disposal(engine)
return False, fail_msg, trace
for row in results:
anomaly_timestamp = int(row['anomaly_timestamp'])
metric_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(anomaly_timestamp)))
match_id = int(row['id'])
# @modified 20180921 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# matched_by = 'layers'
try:
approx_close = int(row['approx_close'])
except:
approx_close = 0
if approx_close == 0:
matched_by = 'layers'
else:
matched_by = 'layers - approx_close'
fp_id = int(row['fp_id'])
layer_id = int(row['layer_id'])
# Get metric name, first get metric id from the features profile
# record
try:
metric_list = [row[2] for row in ionosphere_summary_list if row[0] == fp_id]
metric = metric_list[0]
except:
metric = 'UNKNOWN'
uri_to_matched_page = 'None'
matches.append([metric_human_date, match_id, matched_by, fp_id, layer_id, metric, uri_to_matched_page])
sorted_matches = sorted(matches, key=lambda x: x[0])
matches = sorted_matches
if engine:
engine_disposal(engine)
try:
del metric_list
except:
logger.error('error :: failed to del metrics_list')
# @added 20180809 - Bug #2496: error reported on no matches found
# https://github.com/earthgecko/skyline/issues/64
# If there are no matches return this information in matches to prevent
# webapp from reporting an error
if not matches:
# [[1505560867, 39793, 'features_profile', 782, 'None', 'stats.skyline-dev-3-40g-gra1.vda.ioInProgress', 'ionosphere?fp_matched=true...'],
# @modified 20180921 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# matches = [['None', 'None', 'no matches were found', 'None', 'None', 'no matches were found', 'None']]
matches = [['None', 'None', 'no matches were found', 'None', 'None', 'no matches were found', 'None', 'None']]
return matches, fail_msg, trace
# @added 20170917 - Feature #1996: Ionosphere - matches page
def get_matched_id_resources(matched_id, matched_by, metric, requested_timestamp):
"""
Get the Ionosphere matched details of a features profile or layer
:param matched_id: the matched id
:type id: int
:param matched_by: either features_profile or layers
:type id: str
:param metric: metric base_name
:type id: str
:param requested_timestamp: the timestamp of the features profile
:type id: int
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: get_matched_id_resources'
trace = 'none'
fail_msg = 'none'
matched_details = None
use_table = 'ionosphere_matched'
if matched_by == 'layers':
use_table = 'ionosphere_layers_matched'
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if matched_by == 'features_profile':
ionosphere_matched_table = None
try:
ionosphere_matched_table, fail_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
if matched_by == 'layers':
ionosphere_layers_matched_table = None
try:
ionosphere_layers_matched_table, fail_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
if trace != 'none':
fail_msg = 'error :: failed to get %s table for matched id %s' % (use_table, str(matched_id))
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: %s table OK' % (function_str, use_table))
if matched_by == 'features_profile':
stmt = select([ionosphere_matched_table]).where(ionosphere_matched_table.c.id == int(matched_id))
if matched_by == 'layers':
stmt = select([ionosphere_layers_matched_table]).where(ionosphere_layers_matched_table.c.id == int(matched_id))
try:
connection = engine.connect()
# stmt = select([ionosphere_matched_table]).where(ionosphere_matched_table.c.id == int(matched_id))
result = connection.execute(stmt)
row = result.fetchone()
matched_details_object = row
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get matched_id %s details from %s DB table' % (str(matched_id), use_table)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if matched_by == 'features_profile':
try:
fp_id = row['fp_id']
metric_timestamp = row['metric_timestamp']
all_calc_features_sum = row['all_calc_features_sum']
all_calc_features_count = row['all_calc_features_count']
sum_common_values = row['sum_common_values']
common_features_count = row['common_features_count']
tsfresh_version = row['tsfresh_version']
matched_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(metric_timestamp)))
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling
minmax = int(row['minmax'])
minmax_fp_features_sum = row['minmax_fp_features_sum']
minmax_fp_features_count = row['minmax_fp_features_count']
minmax_anomalous_features_sum = row['minmax_anomalous_features_sum']
minmax_anomalous_features_count = row['minmax_anomalous_features_count']
matched_details = '''
tsfresh_version :: %s
all_calc_features_sum :: %s | all_calc_features_count :: %s
sum_common_values :: %s | common_features_count :: %s
metric_timestamp :: %s | human_date :: %s
minmax_scaled :: %s
minmax_fp_features_sum :: %s | minmax_fp_features_count :: %s
minmax_anomalous_features_sum :: %s | minmax_anomalous_features_count :: %s
''' % (str(tsfresh_version), str(all_calc_features_sum),
str(all_calc_features_count), str(sum_common_values),
str(common_features_count), str(metric_timestamp),
str(matched_human_date), str(minmax),
str(minmax_fp_features_sum), str(minmax_fp_features_count),
str(minmax_anomalous_features_sum),
str(minmax_anomalous_features_count))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get details for matched id %s' % str(matched_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
full_duration_stmt = 'SELECT full_duration FROM ionosphere WHERE id=%s' % str(fp_id)
full_duration = None
try:
connection = engine.connect()
result = connection.execute(full_duration_stmt)
connection.close()
for row in result:
if not full_duration:
full_duration = int(row[0])
logger.info('full_duration for matched determined as %s' % (str(full_duration)))
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine full_duration from ionosphere table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
if matched_by == 'layers':
try:
layer_id = row['layer_id']
fp_id = row['fp_id']
metric_timestamp = row['anomaly_timestamp']
anomalous_datapoint = row['anomalous_datapoint']
full_duration = row['full_duration']
matched_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(metric_timestamp)))
matched_details = '''
layer_id :: %s
anomalous_datapoint :: %s
full_duration :: %s
metric_timestamp :: %s | human_date :: %s
''' % (str(layer_id), str(anomalous_datapoint), str(full_duration),
str(metric_timestamp), str(matched_human_date))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get details for matched id %s' % str(matched_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
# Create a Graphite image
from_timestamp = str(int(metric_timestamp) - int(full_duration))
until_timestamp = str(metric_timestamp)
timeseries_dir = metric.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, timeseries_dir,
str(requested_timestamp))
if matched_by == 'features_profile':
graph_image_file = '%s/%s.matched.fp_id-%s.%s.png' % (
metric_data_dir, metric, str(fp_id), str(metric_timestamp))
if matched_by == 'layers':
graph_image_file = '%s/%s.layers_id-%s.matched.layers.fp_id-%s.%s.png' % (
metric_data_dir, metric, str(matched_id),
str(fp_id), str(layer_id))
if not path.isfile(graph_image_file):
logger.info('getting Graphite graph for match - from_timestamp - %s, until_timestamp - %s' % (str(from_timestamp), str(until_timestamp)))
graph_image = get_graphite_metric(
skyline_app, metric, from_timestamp, until_timestamp, 'image',
graph_image_file)
if not graph_image:
logger.error('failed getting Graphite graph')
graph_image_file = None
return matched_details, True, fail_msg, trace, matched_details_object, graph_image_file
# @added 20180812 - Feature #2430: Ionosphere validate learnt features profiles page
def get_features_profiles_to_validate(base_name):
"""
Get the details for Ionosphere features profiles that need to be validated
for a metric and returns a list of the details for each of the features
profile including the ionosphere_image API URIs for all the relevant graph
images for the weabpp Ionosphere validate_features_profiles page.
[[ fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp,
fp_parent_id, parent_full_duration, parent_anomaly_timestamp, fp_date,
fp_graph_uri, parent_fp_date, parent_fp_graph_uri, parent_parent_fp_id,
fp_learn_graph_uri, parent_fp_learn_graph_uri, minimum_full_duration,
maximum_full_duration]]
:param base_name: metric base_name
:type base_name: str
:return: list of lists
:rtype: [[int, int, str, int, int, int, int, int, str, str, str, str, int, str, str, int, int]]
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: get_feature_profiles_validate'
trace = 'none'
fail_msg = 'none'
# Query the ionosphere_functions function for base_name, validated == false
# and get the details for each features profile that needs to be validated
features_profiles_to_validate = []
search_success = False
fps = []
try:
fps, fps_count, mc, cc, gc, full_duration_list, enabled_list, tsfresh_version_list, generation_list, search_success, fail_msg, trace = ionosphere_search(False, True)
logger.info('fp object :: %s' % str(fps))
except:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: error with search_ionosphere' % function_str
logger.error(fail_msg)
return (features_profiles_to_validate, fail_msg, trace)
if not search_success:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: Webapp error with search_ionosphere' % function_str
logger.error(fail_msg)
return (features_profiles_to_validate, fail_msg, trace)
# Determine the minimum and maximum full durations from the returned fps so
# this can be used later to determine what class of features profile is
# being dealt with in terms of whether the features profile is a
# full_duration LEARNT features profile or a settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS
# LEARNT features profile. This allows for determining the correct other
# resolution ionosphere_image URIs which are interpolated for display in the
# HTML table on the validate_features_profiles page.
minimum_full_duration = None
maximum_full_duration = None
# [fp_id, metric_id, metric, full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label]
# [4029, 157, 'stats.skyline-dev-3.vda1.ioTime', 604800, 1534001973, '0.4.0', 0.841248, 210, 70108436036.9, 0, 0, 'never matched', '2018-08-11 16:41:04', 0, 'never checked', 3865, 6, 0, 0]
# for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id in fps:
for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label in fps:
if not minimum_full_duration:
minimum_full_duration = int(fp_full_duration)
else:
if int(fp_full_duration) < int(minimum_full_duration):
minimum_full_duration = int(fp_full_duration)
if not maximum_full_duration:
maximum_full_duration = int(fp_full_duration)
else:
if int(fp_full_duration) > int(maximum_full_duration):
maximum_full_duration = int(fp_full_duration)
# Get the features profile parent details (or parent parent if needed) to
# determine the correct arguments for the ionosphere_image URIs for the
# graph images of the parent, from which the fp being evaluated this was
# learn for side-by-side visual comparison to inform the user and all for
# them to
# [fp_id, metric_id, metric, full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label]
# for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id in fps:
for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label in fps:
if int(fp_parent_id) == 0:
continue
if int(fp_validated) == 1:
continue
# @added 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
if fp_id not in enabled_list:
continue
parent_fp_details_object = None
parent_parent_fp_id = None
try:
parent_fp_details, success, fail_msg, trace, parent_fp_details_object = features_profile_details(fp_parent_id)
except:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: failed to get parent_fp_details_object from features_profile_details for parent fp_id %s' % (
function_str, str(fp_parent_id))
logger.error(fail_msg)
return (features_profiles_to_validate, fail_msg, trace)
if not parent_fp_details_object:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: no parent_fp_details_object from features_profile_details for parent fp_id %s' % (
function_str, str(fp_parent_id))
logger.error(fail_msg)
return (features_profiles_to_validate, fail_msg, trace)
parent_full_duration = parent_fp_details_object['full_duration']
# If the features profile is learnt at a full_duration of
# settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS (aka
# maximum_full_duration), the graphs of the parent's parent fp ip are
# required. This is because a features profile that is LEARNT in the
# learn full duration in days context, will essentially have the same
# graphs as it's parent. Therefore the graphs of the parent's parent
# are required to allow for the side-by-side visual comparsion.
get_parent_parent = False
if int(fp_full_duration) > int(minimum_full_duration):
get_parent_parent = True
try:
parent_parent_fp_id = parent_fp_details_object['parent_id']
except:
parent_parent_fp_id = 0
if int(parent_parent_fp_id) == 0:
get_parent_parent = False
parent_parent_fp_details_object = None
if get_parent_parent:
try:
parent_parent_fp_id = parent_fp_details_object['parent_id']
parent_parent_fp_details, success, fail_msg, trace, parent_parent_fp_details_object = features_profile_details(parent_parent_fp_id)
parent_parent_full_duration = parent_parent_fp_details_object['full_duration']
parent_parent_anomaly_timestamp = parent_parent_fp_details_object['anomaly_timestamp']
except:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: failed to get parent_parent_fp_details_object from features_profile_details for parent parent fp_id %s' % (
function_str, str(parent_parent_fp_id))
logger.error(fail_msg)
return (features_profiles_to_validate, fail_msg, trace)
if not parent_fp_details_object:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: no parent_fp_details_object from features_profile_details for parent fp_id %s' % (
function_str, str(fp_parent_id))
logger.error(fail_msg)
return (features_profiles_to_validate, fail_msg, trace)
parent_full_duration = parent_fp_details_object['full_duration']
parent_anomaly_timestamp = parent_fp_details_object['anomaly_timestamp']
metric_timeseries_dir = base_name.replace('.', '/')
# https://skyline.example.com/ionosphere_images?image=/opt/skyline/ionosphere/features_profiles/stats/skyline-1/io/received/1526312070/stats.skyline-1.io.received.graphite_now.168h.png
# Existing image URLs are namespaced and available via the API from:
# ionosphere_images?image=/opt/skyline/ionosphere/features_profiles/stats/<base_name>/io/received/<timestamp>/<graphite_metric_namespace>.graphite_now.<full_duration_in_hours>h.png
fp_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, metric_timeseries_dir,
str(anomaly_timestamp))
full_duration_in_hours = fp_full_duration / 60 / 60
fp_date = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(int(anomaly_timestamp)))
fp_graph_uri = 'ionosphere_images?image=%s/%s.graphite_now.%sh.png' % (
str(fp_data_dir), base_name, str(int(full_duration_in_hours)))
if int(fp_full_duration) < maximum_full_duration:
fp_hours = int(maximum_full_duration / 60 / 60)
get_hours = str(fp_hours)
else:
fp_hours = int(minimum_full_duration / 60 / 60)
get_hours = str(fp_hours)
fp_learn_graph_uri = 'ionosphere_images?image=%s/%s.graphite_now.%sh.png' % (
str(fp_data_dir), base_name, get_hours)
# For this is a LEARNT feature profile at settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS
# the we want to compare the graph to the parent's parent graph at
# settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS
parent_fp_date_str = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(int(parent_anomaly_timestamp)))
parent_fp_date = '%s - using parent fp id %s' % (str(parent_fp_date_str), str(int(fp_parent_id)))
if get_parent_parent and parent_parent_fp_details_object:
parent_fp_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, metric_timeseries_dir,
str(parent_parent_anomaly_timestamp))
if parent_parent_full_duration < maximum_full_duration:
parent_full_duration_in_hours = int(minimum_full_duration) / 60 / 60
else:
parent_full_duration_in_hours = int(parent_parent_full_duration) / 60 / 60
parent_parent_fp_date_str = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(int(parent_parent_anomaly_timestamp)))
parent_fp_date = '%s - using parent\'s parent fp id %s' % (str(parent_parent_fp_date_str), str(int(parent_parent_fp_id)))
else:
parent_fp_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, metric_timeseries_dir,
str(parent_anomaly_timestamp))
if parent_full_duration > fp_full_duration:
parent_full_duration_in_hours = int(fp_full_duration) / 60 / 60
else:
parent_full_duration_in_hours = int(parent_full_duration) / 60 / 60
parent_fp_graph_uri = 'ionosphere_images?image=%s/%s.graphite_now.%sh.png' % (
str(parent_fp_data_dir), base_name, str(int(parent_full_duration_in_hours)))
if int(fp_full_duration) == maximum_full_duration:
fp_hours = int(minimum_full_duration / 60 / 60)
get_hours = str(fp_hours)
else:
fp_hours = int(maximum_full_duration / 60 / 60)
get_hours = str(fp_hours)
parent_fp_learn_graph_uri = 'ionosphere_images?image=%s/%s.graphite_now.%sh.png' % (
# str(parent_fp_data_dir), base_name, str(int(parent_full_duration_in_hours)))
str(parent_fp_data_dir), base_name, get_hours)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add to features_profiles_to_validate if fp_id in enabled_list
if fp_id in enabled_list:
features_profiles_to_validate.append([fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, fp_parent_id, parent_full_duration, parent_anomaly_timestamp, fp_date, fp_graph_uri, parent_fp_date, parent_fp_graph_uri, parent_parent_fp_id, fp_learn_graph_uri, parent_fp_learn_graph_uri, minimum_full_duration, maximum_full_duration])
logger.info('%s :: features_profiles_to_validate - %s' % (
function_str, str(features_profiles_to_validate)))
return (features_profiles_to_validate, fail_msg, trace)
# @added 20180815 - Feature #2430: Ionosphere validate learnt features profiles page
def get_metrics_with_features_profiles_to_validate():
"""
Get the metrics with Ionosphere features profiles that need to be validated
and return a list of the details for each metric.
[[metric_id, metric, fps_to_validate_count]]
:return: list of lists
:rtype: [[int, str, int]]
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: get_metrics_with_features_profiles_to_validate'
trace = 'none'
fail_msg = 'none'
# Query the ionosphere_functions function for base_name, validated == false
# and get the details for each features profile that needs to be validated
metrics_with_features_profiles_to_validate = []
search_success = False
fps = []
try:
fps, fps_count, mc, cc, gc, full_duration_list, enabled_list, tsfresh_version_list, generation_list, search_success, fail_msg, trace = ionosphere_search(False, True)
except:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: error with search_ionosphere' % function_str
logger.error(fail_msg)
return (metrics_with_features_profiles_to_validate, fail_msg, trace)
if not search_success:
trace = traceback.format_exc()
fail_msg = 'error :: %s :: Webapp error with search_ionosphere' % function_str
logger.error(fail_msg)
return (metrics_with_features_profiles_to_validate, fail_msg, trace)
# Determine the minimum and maximum full durations from the returned fps so
# this can be used later to determine what class of features profile is
# being dealt with in terms of whether the features profile is a
# full_duration LEARNT features profile or a settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS
# LEARNT features profile. This allows for determining the correct other
# resolution ionosphere_image URIs which are interpolated for display in the
# HTML table on the validate_features_profiles page.
# [fp_id, metric_id, metric, full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label]
metric_ids_with_fps_to_validate = []
# for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id in fps:
for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label in fps:
# @added 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add to features_profiles_to_validate if fp_id in enabled_list
if fp_id not in enabled_list:
continue
if metric_id not in metric_ids_with_fps_to_validate:
metric_ids_with_fps_to_validate.append(metric_id)
for i_metric_id in metric_ids_with_fps_to_validate:
fps_to_validate_count = 0
for fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp, tsfresh_version, calc_time, features_count, features_sum, deleted, fp_matched_count, human_date, created_timestamp, fp_checked_count, checked_human_date, fp_parent_id, fp_generation, fp_validated, fp_layers_id, layer_matched_count, layer_human_date, layer_check_count, layer_checked_human_date, layer_label in fps:
if i_metric_id != metric_id:
continue
# @added 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add to features_profiles_to_validate if fp_id in enabled_list
if fp_id not in enabled_list:
continue
if fp_validated == 0:
fps_to_validate_count += 1
i_metric = metric
if fps_to_validate_count > 0:
metrics_with_features_profiles_to_validate.append([i_metric_id, i_metric, fps_to_validate_count])
logger.info('%s :: metrics with features profiles to validate - %s' % (
function_str, str(metrics_with_features_profiles_to_validate)))
return (metrics_with_features_profiles_to_validate, fail_msg, trace)
# @added 20181205 - Bug #2746: webapp time out - Graphs in search_features_profiles
# Feature #2602: Graphs in search_features_profiles
def ionosphere_show_graphs(requested_timestamp, data_for_metric, fp_id):
"""
Get a list of all graphs
"""
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, '', 1)
log_context = 'features profile data show graphs'
logger.info('%s requested for %s at %s' % (
log_context, str(base_name), str(requested_timestamp)))
images = []
timeseries_dir = base_name.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, timeseries_dir,
str(requested_timestamp))
td_files = listdir(metric_data_dir)
for i_file in td_files:
metric_file = path.join(metric_data_dir, i_file)
if i_file.endswith('.png'):
# @modified 20170106 - Feature #1842: Ionosphere - Graphite now graphs
# Exclude any graphite_now png files from the images lists
append_image = True
if '.graphite_now.' in i_file:
append_image = False
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Exclude any matched.fp-id images
if '.matched.fp_id' in i_file:
append_image = False
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1852: Ionosphere - features_profile matched graphite graphs
# Exclude any matched.fp-id images
if '.matched.layers.fp_id' in i_file:
append_image = False
if append_image:
images.append(str(metric_file))
graphite_now_images = []
graphite_now = int(time.time())
graph_resolutions = []
graph_resolutions = [int(settings.TARGET_HOURS), 24, 168, 720]
# @modified 20170107 - Feature #1842: Ionosphere - Graphite now graphs
# Exclude if matches TARGET_HOURS - unique only
_graph_resolutions = sorted(set(graph_resolutions))
graph_resolutions = _graph_resolutions
for target_hours in graph_resolutions:
graph_image = False
try:
graph_image_file = '%s/%s.graphite_now.%sh.png' % (metric_data_dir, base_name, str(target_hours))
if path.isfile(graph_image_file):
graphite_now_images.append(graph_image_file)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Graphite graph at %s hours for %s' % (str(target_hours), base_name))
return (images, graphite_now_images)
``` |
{
"source": "2emoore4/lcr",
"score": 3
} |
#### File: 2emoore4/lcr/patternizer.py
```python
import Image
def create_white_and_black():
img = Image.new('1', (608, 684), 'white')
img.save('Images/test/white.bmp', 'BMP')
img = Image.new('1', (608, 684), 'black')
img.save('Images/test/black.bmp', 'BMP')
def create_half_and_half():
img = Image.new('1', (608, 684), 'white')
for y in xrange(img.size[1] / 2):
for x in xrange(img.size[0]):
img.putpixel((x, y), 0)
img.save('Images/test/half1.bmp')
img = Image.new('1', (608, 684), 'black')
for y in xrange(img.size[1] / 2):
for x in xrange(img.size[0]):
img.putpixel((x, y), 1)
img.save('Images/test/half2.bmp')
def create_diagonals():
img = Image.new('1', (608, 684), 'white')
for y in xrange(img.size[1] / 2):
for x in xrange(img.size[0] / 2, img.size[0]):
img.putpixel((x, y), 0)
for y in xrange(img.size[1] / 2, img.size[1]):
for x in xrange(img.size[0] / 2):
img.putpixel((x, y), 0)
img.save('Images/test/diag1.bmp')
img = Image.new('1', (608, 684), 'black')
for y in xrange(img.size[1] / 2):
for x in xrange(img.size[0] / 2, img.size[0]):
img.putpixel((x, y), 1)
for y in xrange(img.size[1] / 2, img.size[1]):
for x in xrange(img.size[0] / 2):
img.putpixel((x, y), 1)
img.save('Images/test/diag2.bmp')
def create_vertical_line_sequence():
for x in xrange(608):
img = Image.new('1', (608, 684), 'black')
draw_white_line_at_x(img, [x])
img.save('Images/test/vertical/line' + str(x) + '.bmp')
def create_vert_seven():
for frame in xrange(86): # for each of 86 frames
img = Image.new('1', (608, 684), 'black')
for line in xrange(7): # for each of 7 lines
# draw vertical stripe at ((line * 86) + frame)
x = (line * 86) + frame
draw_white_line_at_x(img, [x])
img.save('Images/test/vert_seven/line' + str(frame) + '.bmp')
def moire():
img = Image.new('1', (608, 684), 'black')
for y in xrange(img.size[1]):
for x in xrange(0, img.size[0], 2):
img.putpixel((x, y), 1)
img.save('Images/test/moire.bmp')
def create_weird_code():
img = Image.new('1', (608, 684), 'black')
draw_white_line_at_x(img,
[1,2,4,7,9,10,13,14,16,19,20,23,25,26,28,31,33,34,36,39,40,42,45,46,48,51,52,54,57,59,60,62,65,67,69,70,72,75,77,78,81,83,84,86,89,90,93,95,96,98,101,102,104,107,109],
5)
img.save('Images/test/code/frame00.bmp')
img = Image.new('1', (608, 684), 'black')
draw_white_line_at_x(img,
[2,4,5,7,10,12,13,15,18,20,21,23,26,28,29,32,34,35,37,40,41,43,46,47,49,52,53,55,58,59,61,64,66,67,69,72,74,75,77,80,82,83,86,87,89,92,93,95,98,100,101,103,106,108,109], 5)
img.save('Images/test/code/frame01.bmp')
img = Image.new('1', (608, 684), 'black')
draw_white_line_at_x(img,
[1,3,4,7,9,10,13,15,16,18,21,23,24,26,29,30,32,35,37,38,40,43,45,46,48,51,53,54,56,59,60,62,65,66,68,71,73,74,77,78,80,83,85,86,88,91,93,94,96,99,100,102,105,107,108], 5)
img.save('Images/test/code/frame02.bmp')
img = Image.new('1', (608, 684), 'black')
draw_white_line_at_x(img,
[2,3,5,8,10,11,13,16,18,19,22,23,25,28,30,31,34,35,37,40,42,43,45,48,50,51,53,56,58,59,61,64,65,67,70,71,73,76,78,79,81,84,86,87,89,92,94,95,97,100,102,103,105,108,109], 5)
img.save('Images/test/code/frame03.bmp')
def draw_white_line_at_x(image, x_locations, size = 1):
for loc in x_locations:
for x in xrange(loc * size, (loc * size) + size):
for y in xrange(image.size[1]):
image.putpixel((x, y), 1)
def location_of_stripe(stripe_num):
return stripe * 5
moire()
create_white_and_black()
create_half_and_half()
create_diagonals()
create_vertical_line_sequence()
create_vert_seven()
create_weird_code()
``` |
{
"source": "2flps/python-autodrawer",
"score": 4
} |
#### File: 2flps/python-autodrawer/ini_parser.py
```python
class Parser:
def __init__(self, nomeArquivo):
'''
-> Constructor
:param nomeArquivo: The name of the file you want to generate
'''
self.nomeArquivo = nomeArquivo
def escrever(self):
'''
-> Irá gerar um arquivo .ini de configurações na pasta raíz. Caso o arquivo já exista, nada acontecerá
:return: sem retorno
'''
try:
arquivo = open(f'{self.nomeArquivo}', 'x')
arquivo.write('''# ---File Configs---
# This is the file where you are going to put your configs in
# Please, be cautious to not fill some parameter with the wrong value
#
# THIS IS YOUR CANVAS
# -> x=int y=int------------------ <- x=int y=int
# | |
# | CANVAS |
# | |
# -> x=int y=int------------------ <- x=int y=int
#
photo = images/photo.png # < - String. The file you want to draw on Paint 3D. Must be inside the 'images' folder by default
monitor_x = 1920 # <- Integer. The X size of your monitor. 1920 by default
monitor_y = 1080 # <- Integer. The Y size of your monitor. 1080 by default
canvas_topleftx = 434 # <- Integer. The X position of the Top Left Corner of your canvas. 434 by default
canvas_toplefty = 315 # <- Integer. The Y position of the Top Left Corner of your canvas. 315 by default
canvas_bottomrightx = 1273 # <- Integer. The X position of the Bottom Right Corner of your canvas. 1273 by default
canvas_bottomrighty = 862 # <- Integer. The Y position of the Bottom Right Corner of your canvas. 862 by default
canvas_zoom = 33 # <- Integer. The zoom you want your canvas to be. 33 by default
canvas_zoompos = (1576, 102) # <- Tuple. A tuple with two values. The first one is the X position of the zoom selector. The second one is the Y position of the zoom selector. (1576, 102) by default
keyboard_interruptionKey = space # <- String. The keyboard key to interrupt the program. 'space' by default
colorSelector_rpos = (1145, 493) # <- Tuple. A tuple with two values. The first one is the X position of the R value in the color selector. The second one is the Y position of the R value in the color selector. (1145, 493) by default
colorSelector_gpos = (1145, 550) # <- Tuple. A tuple with two values. The first one is the X position of the G value in the color selector. The second one is the Y position of the G value in the color selector. (1145, 550) by default
colorSelector_bpos = (1145, 606) # <- Tuple. A tuple with two values. The first one is the X position of the B value in the color selector. The second one is the Y position of the B value in the color selector. (1145, 606) by default
colorSelector_okbutton = (851, 728) # <- Tuple. A tuple with two values. The first one is the X position of the OK button in the color selector. The second one is the Y position of the OK button in the color selector. (851, 728) by default
colorPalette_colorpos = (1695, 997) # <- Tuple. A tuple with two values. The first one is the X position of the color to be changed in the color palette. The second one is the Y position of the color to be changed in the color palette. (1695, 997) by default
draw_tool = pencil # <- String. The tool you want to use. The available tools are: pencil, crayon, pixelpen. 'pencil' by default
draw_thickness = 6 # <- Integer. The thickness of the tool. Must be > 0. 6 by default
draw_opacity = 60 # <- Integer. The opacity of the tool. Must be > 0 and < 101. 60 by default
draw_thicknesspos = (1866, 285) # <- Tuple. A tuple with two values. The first one is the X position of the thickness selector on the screen. The second one is the Y position of the thickness selector on the screen. (1866, 285) by default
draw_opacitypos = (1863, 365) # <- Tuple. A tuple with two values. The first one is the X position of the opacity selector on the screen. The second one is the Y position of the opacity selector on the screen. (1863, 365) by default
delay = 0.01 # Float. The delay of drawing pixels on the canvas. WARNING: Lower values might crash/glitch Paint 3D. If you are experiencing glitches even with the default value, please INCREASE the value. 0.01 by default
''')
except:
print('Arquivo já existente')
def contarLinhas(self):
'''
-> This will count how many lines there is on the file
:return: The total amount of lines
'''
arquivo = open(f'{self.nomeArquivo}', 'r')
linhas = 0
for linha in arquivo:
if linha != '\n':
linhas += 1
arquivo.close()
return linhas
def linhas(self):
'''
-> This will generate a list where each index is one line of the file
:return: Return a list where each index is one line
'''
linha = open('{}'.format(self.nomeArquivo), 'r')
linhatotal = linha.readlines()
listanova = list()
for index in linhatotal:
linhanova = index.replace('\n', '').strip()
listanova.append(linhanova[:])
return listanova
def procurarParametro(self, param):
'''
-> This will search every line trying to find the specified parameter.
:param param: The parameter you want to seach
:return: Returns the entire string containg the parameter
'''
listaLinhas = self.linhas()
linha = -1
contagem = 0
while True:
if contagem >= len(listaLinhas):
raise IndexError('Parâmetro não encotrado na lista.')
if listaLinhas[contagem][0] == '' or listaLinhas[contagem][0] == '#' or listaLinhas[contagem][0] == ' ':
contagem += 1
elif param in listaLinhas[contagem]:
linha = contagem
break
else:
contagem+= 1
pass
return listaLinhas[linha]
def posIgual(self, linha):
'''
-> Will find the "=" position on the line
:param linha: The line you want to find the "=" position
:return: Return the index position of "="
'''
posIgual = -1
for c in range(0, len(linha)):
if linha[c] == '=':
posIgual = c
else:
pass
return posIgual
def posComent(self, linha):
'''
-> Will find the "#" position on the line
:param linha: The line you want to find the "#" position
:return: Return the index position of "#"
'''
posComent = -1
for c in range(0, len(linha)):
if linha[c] == '#':
posComent = c
else:
pass
return posComent
def argumento(self, linha, type):
'''
-> Will return the value specified for the parameter
:param linha: The line you want to return the value
:param type: The type you want to return the values. Currently, there are this options: 'tuple_int', 'int', 'str', 'float'. Please, specify correctly to each one
:return: Will return the value for the line.
'''
TIPOS = ['tuple_int', 'int', 'str', 'float']
arg = linha[self.posIgual(linha) + 1 : self.posComent(linha)].strip()
if type not in TIPOS:
raise TypeError('Type not available')
if type == 'tuple_int':
try:
argtupla = arg.replace('(', '').replace(')', '').replace(' ', '').split(',')
lista = list(argtupla)
listanova = list()
for item in lista:
integer = int(item)
listanova.append(integer)
return tuple(listanova)
except Exception:
raise TypeError('Not possible to convert to the specified type.')
elif type == 'int':
try:
return int(arg)
except Exception:
raise TypeError('Not possible to convert to the specified type.')
elif type == 'str':
try:
return arg
except Exception:
raise TypeError('Not possible to convert to the specified type.')
elif type == 'float':
try:
return float(arg)
except Exception:
raise TypeError('Not possible to convert to the specified type.')
class Parameters(Parser):
'''
-> Each method in this class does the same thing. Just the parameter changes
'''
pass
def photo(self):
return self.argumento(self.procurarParametro('photo'), 'str')
def monitor_x(self):
return self.argumento(self.procurarParametro('monitor_x'), 'int')
def monitor_y(self):
return self.argumento(self.procurarParametro('monitor_y'), 'int')
def canvas_topleftx(self):
return self.argumento(self.procurarParametro('canvas_topleftx'), 'int')
def canvas_toplefty(self):
return self.argumento(self.procurarParametro('canvas_toplefty'), 'int')
def canvas_bottomrightx(self):
return self.argumento(self.procurarParametro('canvas_bottomrightx'), 'int')
def canvas_bottomrighty(self):
return self.argumento(self.procurarParametro('canvas_bottomrighty'), 'int')
def canvas_zoom(self):
return self.argumento(self.procurarParametro('canvas_zoom'), 'int')
def canvas_zoompos(self):
return self.argumento(self.procurarParametro('canvas_zoompos'), 'tuple_int')
def keyboard_interruptionKey(self):
return self.argumento(self.procurarParametro('keyboard_interruptionKey'), 'str')
def colorSelector_rpos(self):
return self.argumento(self.procurarParametro('colorSelector_rpos'), 'tuple_int')
def colorSelector_gpos(self):
return self.argumento(self.procurarParametro('colorSelector_gpos'), 'tuple_int')
def colorSelector_bpos(self):
return self.argumento(self.procurarParametro('colorSelector_bpos'), 'tuple_int')
def colorSelector_okbutton(self):
return self.argumento(self.procurarParametro('colorSelector_okbutton'), 'tuple_int')
def colorPalette_colorpos(self):
return self.argumento(self.procurarParametro('colorPalette_colorpos'), 'tuple_int')
def draw_tool(self):
return self.argumento(self.procurarParametro('draw_tool'), 'str')
def draw_thickness(self):
return self.argumento(self.procurarParametro('draw_thickness'), 'int')
def draw_opacity(self):
return self.argumento(self.procurarParametro('draw_opacity'), 'int')
def draw_thicknesspos(self):
return self.argumento(self.procurarParametro('draw_thicknesspos'), 'tuple_int')
def draw_opacitypos(self):
return self.argumento(self.procurarParametro('draw_opacitypos'), 'tuple_int')
def delay(self):
return self.argumento(self.procurarParametro('delay'), 'float')
if __name__ == '__main__':
pass
else:
Parameters('config.ini').escrever()
``` |
{
"source": "2flps/Python-Brasileirao",
"score": 4
} |
#### File: 2flps/Python-Brasileirao/menuopcoestabela.py
```python
from tabelabrasileirao import tabela
def times(tabela):
'''
-> Irá pegar o nome de todos os times e colocá-los em uma tabela
:param tabela: tabela na qual há as informações do campeonato brasileiro
:return: lista com os nomes dos times
'''
times = list()
for c in range(0, len(tabela)):
times.append(tabela[c]['Time'][:])
return times
def verTimes(listaTimes):
'''
-> Irá printar o nome de todos os times de forma formatada
:param listaTimes: lista na qual há todos os times (função times())
:return: sem retorno
'''
print('Lista de times:', end = ' ')
ultimoElemento = len(listaTimes) #Irá pegar o tamanho da lista
print(ultimoElemento)
for c in range(0, ultimoElemento):
if c == ultimoElemento - 1:
print(listaTimes[c], end = '.')
else:
print(listaTimes[c], end = ', ')
print()
def verTimeNome(times, nomeTime, tabela):
'''
-> Irá mostrar as informações de um time procurando-o pelo nome
:param times: lista contendo o nome dos times
:param nomeTime: time na qual o usuário deseja ver os dados
:param tabela: lista contendo os dicionários com as informações dos times
:return: informações do time/erro caso o time não exista
'''
timeslow = list()
for time in times:
timelow = time.lower()
timeslow.append(timelow[:])
nomedotime = nomeTime.strip().lower()
contador = 0
posicaodotime = 0
encontrado = False
for c in range(0, len(timeslow)):
if timeslow[c] == nomedotime:
posicaodotime = contador
encontrado = True
else:
contador += 1
if encontrado == False:
return 'O seu time não foi encontrado. Talvez você digitou errado o nome do time, ou esqueceu de algum acento.'
else:
return ('''
{}
{:^158}
{}
| {:<15} | {:^17} | {:^8} | {:^8} | {:^11} | {:^9} | {:^11} | {:^11} | {:^13} | {:^17} | {:^4} |
| {:<15} | {:^17} | {:^8} | {:^8} | {:^11} | {:^9} | {:^11} | {:^11} | {:^13} | {:^17} | {:^4} |'''.format('-' * 158, 'BRASILEIRÃO - ANO 2020', '-' * 158, 'Classificação', 'Time', 'Pontos', 'Jogos', 'Vitórias', 'Empates', 'Derrotas', 'Gols pró', 'Gols contra', 'Saldo de gols', '%', tabela[posicaodotime]['Classificação'], tabela[posicaodotime]['Time'], tabela[posicaodotime]['Pontos'], tabela[posicaodotime]['Jogos'], tabela[posicaodotime]['Vitórias'], tabela[posicaodotime]['Empates'], tabela[posicaodotime]['Derrotas'], tabela[posicaodotime]['Gols pró'], tabela[posicaodotime]['Gols contra'], tabela[posicaodotime]['Saldo de gols'], tabela[posicaodotime]['%']))
def verTimeClassificacao(timePos, tabela):
'''
-> Irá mostrar as informações de um time procunrando-o pelo nome
:param timePos: posição do time na tabela (1-20)
:param tabela: lista contendo os dicionários com as informações dos times
:return: informações do time/erro caso a classificação esteja fora do alcance
'''
if timePos > 20 or timePos < 1:
return 'O seu time não foi encontrado. Por favor, digite um valor entre 1 e 20.'
elif type(timePos) != int:
return 'O seu time não foi encontrado. Por favor, digite um valor numérico entre 1 e 20.'
else:
posicaodotime = timePos - 1
return ('''
{}
{:^158}
{}
| {:<15} | {:^17} | {:^8} | {:^8} | {:^11} | {:^9} | {:^11} | {:^11} | {:^13} | {:^17} | {:^4} |
| {:<15} | {:^17} | {:^8} | {:^8} | {:^11} | {:^9} | {:^11} | {:^11} | {:^13} | {:^17} | {:^4} |'''.format('-' * 158, 'BRASILEIRÃO - ANO 2020', '-' * 158, 'Classificação', 'Time', 'Pontos', 'Jogos', 'Vitórias', 'Empates', 'Derrotas', 'Gols pró', 'Gols contra', 'Saldo de gols', '%', tabela[posicaodotime]['Classificação'], tabela[posicaodotime]['Time'], tabela[posicaodotime]['Pontos'], tabela[posicaodotime]['Jogos'], tabela[posicaodotime]['Vitórias'], tabela[posicaodotime]['Empates'], tabela[posicaodotime]['Derrotas'], tabela[posicaodotime]['Gols pró'], tabela[posicaodotime]['Gols contra'], tabela[posicaodotime]['Saldo de gols'], tabela[posicaodotime]['%']))
``` |
{
"source": "2general/django-grains",
"score": 2
} |
#### File: grains/migrations/0002_auto__add_grain.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Grain'
db.create_table(u'grains_grain', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=1024, db_index=True)),
('content_type', self.gf('django.db.models.fields.CharField')(default='text/plain', max_length=255)),
('value', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'grains', ['Grain'])
def backwards(self, orm):
# Deleting model 'Grain'
db.delete_table(u'grains_grain')
models = {
u'grains.grain': {
'Meta': {'object_name': 'Grain'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'text/plain'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['grains']
``` |
{
"source": "2general/django-mailchimp",
"score": 2
} |
#### File: django-mailchimp/mailchimp/chimp.py
```python
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from mailchimp.chimpy.chimpy import Connection as BaseConnection, ChimpyException
from mailchimp.utils import wrap, build_dict, Cache, WarningLogger
from mailchimp.exceptions import (MCCampaignDoesNotExist, MCListDoesNotExist,
MCConnectionFailed, MCTemplateDoesNotExist, MCFolderDoesNotExist)
from mailchimp.constants import *
from mailchimp.settings import WEBHOOK_KEY
import datetime
class SegmentCondition(object):
OPERATORS = {
'eq': lambda a,b: a == b,
'ne': lambda a,b: a != b,
'gt': lambda a,b: a > b,
'lt': lambda a,b: a < b,
'like': lambda a,b: a in b,
'nlike': lambda a,b: a not in b,
'starts': lambda a,b: str(a).startswith(str(b)),
'ends': lambda a,b: str(a).endswith(str(b))
}
def __init__(self, field, op, value):
self.field = field
self.op = op
self.value = value
check_function_name = 'check_%s' % self.field
if not hasattr(self, check_function_name):
check_function_name = 'merge_check'
self.checker = getattr(self, check_function_name)
def check(self, member):
return self.checker(member)
def check_interests(self, member):
interests = self.value.split(',')
if self.op == 'all':
for interest in interests:
if interest not in member.interests:
return False
return True
elif self.op == 'one':
for interest in interests:
if interest in member.interests:
return True
return False
else:
for interest in interests:
if interest in member.interests:
return False
return True
def merge_check(self, member):
return self.OPERATORS[self.op](member.merges[self.field.upper()], self.value)
class BaseChimpObject(object):
_attrs = ()
_methods = ()
verbose_attr = 'id'
cache_key = 'id'
def __init__(self, master, info):
self.master = master
for attr in self._attrs:
setattr(self, attr, info[attr])
base = self.__class__.__name__.lower()
self.cache = master.cache.get_child_cache(getattr(self, self.cache_key))
self.con = master.con
for method in self._methods:
setattr(self, method, wrap(base, self.master.con, method, self.id))
def __repr__(self):
return '<%s object: %s>' % (self.__class__.__name__, getattr(self, self.verbose_attr))
class Campaign(BaseChimpObject):
_attrs = ('archive_url', 'create_time', 'emails_sent', 'folder_id',
'from_email', 'from_name', 'id', 'inline_css', 'list_id',
'send_time', 'status', 'subject', 'title', 'to_name', 'type',
'web_id')
_methods = ('delete', 'pause', 'replicate', 'resume', 'schedule',
'send_now', 'send_test', 'unschedule')
verbose_attr = 'subject'
def __init__(self, master, info):
super(Campaign, self).__init__(master, info)
try:
self.list = self.master.get_list_by_id(self.list_id)
except MCListDoesNotExist:
self.list = None
self._content = None
self.frozen_info = info
def __unicode__(self):
return self.subject
__str__ = __unicode__
@property
def content(self):
return self.get_content()
def get_content(self):
if self._content is None:
self._content = self.con.campaign_content(self.id)
return self._content
def send_now_async(self):
now = datetime.datetime.utcnow()
soon = now + datetime.timedelta(minutes=1)
return self.schedule(soon)
def delete(self):
return self.con.campaign_delete(self.id)
def pause(self):
return self.con.campaign_pause(self.id)
def update(self):
status = []
for key, value in self._get_diff():
status.append(self.con.campaign_update(self.id, key, value))
return all(status)
def _get_diff(self):
diff = []
new_frozen = {}
for key in self._attrs:
current = getattr(self, key)
if self.frozen_info[key] != current:
diff.append((key, current))
new_frozen[key] = current
self.frozen_info = new_frozen
return diff
@property
def is_sent(self):
return self.status == 'sent'
class Member(BaseChimpObject):
_attrs = ('email', 'timestamp')
_extended_attrs = ('id', 'ip_opt', 'ip_signup', 'merges', 'status')
verbose_attr = 'email'
cache_key = 'email'
def __init__(self, master, info):
super(Member, self).__init__(master, info)
def __unicode__(self):
return self.email
__str__ = __unicode__
def __getattr__(self, attr):
if attr in self._extended_attrs:
return self.info[attr]
raise AttributeError, attr
@property
def interests(self):
return [i.strip() for i in self.merges['INTERESTS'].split(',')]
@property
def info(self):
return self.get_info()
def get_info(self):
return self.cache.get('list_member_info', self.con.list_member_info, self.master.id, self.email)
def update(self):
return self.con.list_update_member(self.master.id, self.email, self.merges)
class LazyMemberDict(dict):
def __init__(self, master):
super(LazyMemberDict, self).__init__()
self._list = master
def __getitem__(self, key):
if key in self:
return super(LazyMemberDict, self).__getitem__(key)
value = self._list.get_member(key)
self[key] = value
return value
class List(BaseChimpObject):
'''
This represents a mailing list. Most of the methods (defined in _methods) are wrappers of the flat
API found in chimpy.chimpy. As such, signatures are the same.
'''
_methods = ('batch_subscribe',
'batch_unsubscribe',
'subscribe', # Sig: (email_address,merge_vars{},email_type='text',double_optin=True)
'unsubscribe')
_attrs = ('id', 'date_created', 'name', 'web_id', 'stats')
verbose_attr = 'name'
def __init__(self, *args, **kwargs):
super(List, self).__init__(*args, **kwargs)
self.members = LazyMemberDict(self)
def segment_test(self, match, conditions):
return self.master.con.campaign_segment_test(self.id, {'match': match, 'conditions': conditions})
def list_interest_groupings(self):
return self.master.con.list_interest_groupings(self.id)
def list_interest_groups(self, grouping_id=None, full=False):
grouping_id = int(grouping_id or self._default_grouping())
groupings = self.list_interest_groupings()
grouping = None
for g in groupings:
if int(g['id']) == grouping_id:
grouping = g
break
if not grouping:
return []
if not full:
return [group['name'] for group in grouping['groups']]
return grouping
def add_interest_group(self, groupname, grouping_id=None):
grouping_id = grouping_id or self._default_grouping()
return self.master.con.list_interest_group_add(self.id, groupname, grouping_id)
def remove_interest_group(self, groupname, grouping_id=None):
grouping_id = grouping_id or self._default_grouping()
return self.master.con.list_interest_group_del(self.id, groupname, grouping_id)
def update_interest_group(self, oldname, newname, grouping_id=None):
grouping_id = grouping_id or self._default_grouping()
return self.master.con.list_interest_group_update(self.id, oldname, newname, grouping_id)
def add_interests_if_not_exist(self, *interests):
self.cache.flush('interest_groups')
interest_groups = self.interest_groups['groups']
names = set(g['name'] for g in interest_groups)
for interest in set(interests):
if interest not in names:
self.add_interest_group(interest)
interest_groups.append(interest)
def _default_grouping(self):
if not hasattr(self, '_default_grouping_id'):
groupings = self.list_interest_groupings()
if len(groupings):
self._default_grouping_id = groupings[0]['id']
else:
self._default_grouping_id = None
return self._default_grouping_id
@property
def webhooks(self):
return self.get_webhooks()
def get_webhooks(self):
return self.cache.get('webhooks', self.master.con.list_webhooks, self.id)
def add_webhook(self, url, actions, sources):
return self.master.con.list_webhook_add(self.id, url, actions, sources)
def remove_webhook(self, url):
return self.master.con.list_webhook_del(self.id, url)
def add_webhook_if_not_exists(self, url, actions, sources):
for webhook in self.webhooks:
if webhook['url'] == url:
return True
return self.add_webhook(url, actions, sources)
def install_webhook(self):
domain = Site.objects.get_current().domain
if not (domain.startswith('http://') or domain.startswith('https://')):
domain = 'http://%s' % domain
if domain.endswith('/'):
domain = domain[:-1]
url = domain + reverse('mailchimp_webhook', kwargs={'key': WEBHOOK_KEY})
actions = {'subscribe': True,
'unsubscribe': True,
'profile': True,
'cleaned': True,
'upemail': True,}
sources = {'user': True,
'admin': True,
'api': False}
return self.add_webhook_if_not_exists(url, actions, sources)
@property
def interest_groups(self):
return self.get_interest_groups()
def get_interest_groups(self):
return self.cache.get('interest_groups', self.list_interest_groups, full=True)
def add_merge(self, key, desc, req=None):
req = req or {}
return self.master.con.list_merge_var_add(self.id, key, desc, req if req else False)
def remove_merge(self, key):
return self.master.con.list_merge_var_del(self.id, key)
def add_merges_if_not_exists(self, *new_merges):
self.cache.flush('merges')
merges = [m['tag'].upper() for m in self.merges]
for merge in set(new_merges):
if merge.upper() not in merges:
self.add_merge(merge, merge, False)
merges.append(merge.upper())
@property
def merges(self):
return self.get_merges()
def get_merges(self):
return self.cache.get('merges', self.master.con.list_merge_vars, self.id)
def __unicode__(self):
return self.name
__str__ = __unicode__
def get_member(self, email):
try:
data = self.master.con.list_member_info(self.id, email)
except ChimpyException:
return None
# actually it would make more sense giving the member everything
memberdata = {}
memberdata['timestamp'] = data['timestamp']
memberdata['email'] = data['email']
return Member(self, memberdata)
def filter_members(self, segment_opts):
"""
segment_opts = {'match': 'all' if self.segment_options_all else 'any',
'conditions': simplejson.loads(self.segment_options_conditions)}
"""
mode = all if segment_opts['match'] == 'all' else any
conditions = [SegmentCondition(**dict((str(k), v) for k,v in c.items())) for c in segment_opts['conditions']]
for email, member in self.members.items():
if mode([condition.check(member) for condition in conditions]):
yield member
class Template(BaseChimpObject):
_attrs = ('id', 'layout', 'name', 'preview_image', 'sections', 'default_content', 'source', 'preview')
verbose_attr = 'name'
def build(self, **kwargs):
class BuiltTemplate(object):
def __init__(self, template, data):
self.template = template
self.data = data
self.id = self.template.id
def __iter__(self):
return iter(self.data.items())
data = {}
for key, value in kwargs.items():
if key in self.sections:
data['html_%s' % key] = value
return BuiltTemplate(self, data)
class Folder(BaseChimpObject):
_attrs = ('id', 'name', 'type', 'date_created')
def __init__(self, master, info):
info['id'] = info['folder_id']
del info['folder_id']
super(Folder, self).__init__(master, info)
class Connection(object):
REGULAR = REGULAR_CAMPAIGN
PLAINTEXT = PLAINTEXT_CAMPAIGN
ABSPLIT = ABSPLIT_CAMPAIGN
RSS = RSS_CAMPAIGN
TRANS = TRANS_CAMPAIGN
AUTO = AUTO_CAMPAIGN
DOES_NOT_EXIST = {
'templates': MCTemplateDoesNotExist,
'campaigns': MCCampaignDoesNotExist,
'lists': MCListDoesNotExist,
'folders': MCFolderDoesNotExist,
}
def __init__(self, api_key=None, secure=False, check=True):
self._secure = secure
self._check = check
self._api_key = None
self.con = None
self.is_connected = False
if api_key is not None:
self.connect(api_key)
def connect(self, api_key):
self._api_key = api_key
self.cache = Cache(api_key)
self.warnings = WarningLogger()
self.con = self.warnings.proxy(BaseConnection(self._api_key, self._secure))
if self._check:
status = self.ping()
if status != STATUS_OK:
raise MCConnectionFailed(status)
self.is_connected = True
def ping(self):
return self.con.ping()
@property
def campaigns(self):
return self.get_campaigns()
def get_campaigns(self):
return self.cache.get('campaigns', self._get_categories)
@property
def lists(self):
return self.get_lists()
def get_lists(self):
return self.cache.get('lists', self._get_lists)
@property
def templates(self):
return self.get_templates()
def get_templates(self):
return self.cache.get('templates', self._get_templates)
def _get_categories(self):
return build_dict(self, Campaign, self.con.campaigns()['data'])
def _get_lists(self):
return build_dict(self, List, self.con.lists())
def _get_templates(self):
templates = self.con.campaign_templates()
for t in templates:
t.update(self.con.template_info(template_id=t['id']))
return build_dict(self, Template, templates)
@property
def folders(self):
return self.get_folders()
def get_folders(self):
return self.cache.get('folders', self._get_folders)
def _get_folders(self):
return build_dict(self, Folder, self.con.folders(), key='folder_id')
def get_list_by_id(self, id):
return self._get_by_id('lists', id)
def get_campaign_by_id(self, id):
return self._get_by_id('campaigns', id)
def get_template_by_id(self, id):
return self._get_by_id('templates', id)
def get_template_by_name(self, name):
return self._get_by_key('templates', 'name', name)
def get_folder_by_id(self, id):
return self._get_by_id('folders', id)
def get_folder_by_name(self, name):
return self._get_by_key('folders', 'name', name)
def _get_by_id(self, thing, id):
try:
return getattr(self, thing)[id]
except KeyError:
self.cache.flush(thing)
try:
return getattr(self, thing)[id]
except KeyError:
raise self.DOES_NOT_EXIST[thing](id)
def _get_by_key(self, thing, name, key):
for id, obj in getattr(self, thing).items():
if getattr(obj, name) == key:
return obj
raise self.DOES_NOT_EXIST[thing]('%s=%s' % (name, key))
def create_campaign(self, campaign_type, campaign_list, template, subject,
from_email, from_name, to_name, folder_id=None,
tracking=None, title='',
authenticate=False, analytics=None, auto_footer=False,
generate_text=False, auto_tweet=False, segment_opts=None,
type_opts=None):
"""
Creates a new campaign and returns it for the arguments given.
"""
tracking = tracking or {'opens':True, 'html_clicks': True}
type_opts = type_opts or {}
segment_opts = segment_opts or {}
analytics = analytics or {}
options = {}
if title:
options['title'] = title
else:
options['title'] = subject
options['list_id'] = campaign_list.id
options['template_id'] = template.id
options['subject'] = subject
options['from_email'] = from_email
options['from_name'] = from_name
options['to_name'] = to_name
if folder_id:
options['folder_id'] = folder_id
options['tracking'] = tracking
options['authenticate'] = bool(authenticate)
if analytics:
options['analytics'] = analytics
options['auto_footer'] = bool(auto_footer)
options['generate_text'] = bool(generate_text)
options['auto_tweet'] = bool(auto_tweet)
content = dict(template)
kwargs = {}
if segment_opts.get('conditions', None):
kwargs['segment_opts'] = segment_opts
if type_opts:
kwargs['type_opts'] = type_opts
cid = self.con.campaign_create(campaign_type, options, content,
**kwargs)
camp = self.get_campaign_by_id(cid)
camp.template_object = template
return camp
def queue(self, campaign_type, contents, list_id, template_id, subject,
from_email, from_name, to_name, folder_id=None, tracking_opens=True,
tracking_html_clicks=True, tracking_text_clicks=False, title=None,
authenticate=False, google_analytics=None, auto_footer=False,
auto_tweet=False, segment_options=False, segment_options_all=True,
segment_options_conditions=None, type_opts=None, obj=None):
from mailchimp.models import Queue
segment_options_conditions = segment_options_conditions or []
type_opts = type_opts or {}
kwargs = locals().copy()
del kwargs['Queue']
del kwargs['self']
return Queue.objects.queue(**kwargs)
``` |
{
"source": "2general/staticgenerator",
"score": 2
} |
#### File: staticgenerator/staticgenerator/middleware.py
```python
import re
from django.conf import settings
import logging
from staticgenerator import StaticGenerator, StaticGeneratorException
import sys
logger = logging.getLogger('staticgenerator.middleware')
class StaticGeneratorMiddleware(object):
"""
This requires settings.STATIC_GENERATOR_URLS tuple to match on URLs
Example::
STATIC_GENERATOR_URLS = (
r'^/$',
r'^/blog',
)
"""
urls = tuple([re.compile(url) for url in settings.STATIC_GENERATOR_URLS])
excluded_urls = tuple([re.compile(url) for url in getattr(settings, 'STATIC_GENERATOR_EXCLUDE_URLS', [])])
gen = StaticGenerator()
def process_request(self, request):
request._static_generator = False
if getattr(request, 'disable_static_generator', False):
logger.debug('StaticGeneratorMiddleware: disabled')
return None
if (getattr(settings, 'STATIC_GENERATOR_ANONYMOUS_ONLY', False)
and hasattr(request, 'user')
and not request.user.is_anonymous()):
logger.debug('StaticGeneratorMiddleware: '
'disabled for logged in user')
return None
path = request.path_info
for url in self.excluded_urls:
if url.match(path):
logger.debug('StaticGeneratorMiddleware: '
'path %s excluded', path)
return None
for url in self.urls:
if url.match(path):
request._static_generator = True
try:
logger.debug('StaticGeneratorMiddleware: '
'Trying to publish stale path %s', path)
self.gen.publish_stale_path(
path,
request.META.get('QUERY_STRING', ''),
is_ajax=request.is_ajax())
except StaticGeneratorException:
logger.warning(
'StaticGeneratorMiddleware: '
'failed to publish stale content',
exc_info=sys.exc_info(),
extra={'request': request})
return None
logger.debug('StaticGeneratorMiddleware: path %s not matched', path)
return None
def process_response(self, request, response):
# pylint: disable=W0212
# Access to a protected member of a client class
if (response.status_code == 200
and getattr(request, '_static_generator', False)):
try:
self.gen.publish_from_path(
request.path_info,
request.META.get('QUERY_STRING', ''),
response.content,
is_ajax=request.is_ajax())
except StaticGeneratorException:
# Never throw a 500 page because of a failure in
# writing pages to the cache. Remember to monitor
# the site to detect performance regression due to
# a full disk or insufficient permissions in the
# cache directory.
logger.warning(
'StaticGeneratorMiddleware: '
'failed to publish fresh content',
exc_info=sys.exc_info(),
extra={'request': request})
return response
``` |
{
"source": "2gis/appium-autoregister",
"score": 2
} |
#### File: appium-autoregister/android/__init__.py
```python
from os import environ, path
from subprocess import Popen, PIPE
import logging
import copy
import sys
ENCODING = sys.getdefaultencoding()
def get_command_output(p):
return p.stdout.read().decode(ENCODING).strip()
class Adb(object):
android_home = environ.get("ANDROID_HOME", None)
if android_home is None:
exit("set $ANDROID_HOME to path of your android sdk root")
adb = path.join(android_home, "platform-tools", "adb")
if not path.isfile(adb):
exit("adb executable not found in %s" % adb)
def __init__(self, device_name):
self.device_name = device_name
@classmethod
def _popen(cls, args):
args = [arg if isinstance(arg, str) else arg.decode(ENCODING) for arg in args]
command = [cls.adb] + args
p = Popen(command, stdout=PIPE, stderr=PIPE)
p.wait()
if p.returncode != 0:
logging.warning("failed to run command %s" % " ".join(command))
return p
@classmethod
def devices(cls):
return cls._popen(["devices"]).stdout.readlines()
def getprop(self, prop=""):
p = self._popen(["-s", self.device_name, "shell", "getprop", prop])
return get_command_output(p)
def pm_list_has_package(self, package):
p = self._popen(["-s", self.device_name, "shell", "pm", "list", "packages", package])
return get_command_output(p)
class Device(object):
def __init__(self, name, platform):
self.name = name
self.platform = platform
self.adb = Adb(self.name)
self.version = self.adb.getprop("ro.build.version.release")
self.model = self.adb.getprop("ro.product.model")
self.browsers = self.get_browsers()
def __str__(self):
return "<%s %s %s>" % (self.name, self.platform, self.version)
def to_json(self):
_json = copy.copy(self.__dict__)
del _json['adb']
return _json
def get_browsers(self):
browsers = list()
if self.adb.pm_list_has_package("com.android.chrome"):
browsers.append("chrome")
if not browsers:
browsers.append("")
return browsers
def android_device_names():
for line in Adb.devices():
try:
device_name, state = line.decode(ENCODING).split()
except ValueError:
device_name, state = None, None
if state == "device":
yield device_name
```
#### File: appium-autoregister/appium/__init__.py
```python
import asyncio
import os
import logging
import copy
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from utils import get_free_port, run_command
LOG_DIR = "logs"
log = logging.getLogger(__name__)
class AppiumNode(object):
process = None
process_reader = None
appium_executable = os.environ.get("APPIUM_EXECUTABLE", None)
if appium_executable is None:
exit('set $APPIUM_EXECUTABLE to path of appium executable')
def __init__(self, appium_port, device, config_file=None, generate_bootstrap_port=True, additional_args=None):
self.appium_port = appium_port
self.device = device
self.config_file = config_file
self.generate_bootstrap_port = generate_bootstrap_port
self.additional_args = additional_args
self.log = logging.getLogger(self.device.name)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
self.logfile = os.sep.join([LOG_DIR, device.name])
if self.generate_bootstrap_port:
self.bootstrap_port = get_free_port()
def to_json(self):
_json = copy.copy(self.__dict__)
del _json['process']
del _json['log']
return _json
@property
def _command(self):
command = [
self.appium_executable,
"--port", str(self.appium_port),
"--udid", self.device.name]
if self.generate_bootstrap_port:
command += ["--bootstrap-port", str(self.bootstrap_port)]
if self.additional_args:
command += self.additional_args
if self.config_file:
command += ["--nodeconfig", self.config_file]
return command
def start(self):
if self.process is not None:
return self.process
log.info("starting appium node for %s" % self.device)
log.info("running command %s" % " ".join(self._command))
self.process = Popen(self._command, stderr=STDOUT, stdout=PIPE)
self.process_reader = Thread(target=self._log_process_stdout)
self.process_reader.daemon = True
self.process_reader.start()
log.info("process started with pid %s" % self.process.pid)
return self.process
async def start_coro(self):
if self.process is not None:
return self.process
log.info("starting appium node for %s" % self.device)
self.process = await run_command(self._command, wait_end=False)
await self.process.stdout.read(1)
asyncio.ensure_future(self._write_stdout())
if self.process.returncode:
log.warning((await self.process.communicate()))
log.info("process started with pid %s" % self.process.pid)
return self.process
async def _write_stdout(self):
with open(self.logfile, "wb") as fd:
while self.process.returncode is None and\
not self.process.stdout.at_eof():
line = await self.process.stdout.readline()
if line:
fd.write(line)
def stop(self):
if hasattr(self.process, "poll"):
self.process.poll()
if self.process and not self.process.returncode:
self.process.kill()
if self.process_reader:
self.process_reader.join()
if self.config_file:
os.remove(self.config_file)
log.info("appium node for %s stopped" % self.device)
async def delete(self):
self.stop()
def _log_process_stdout(self):
while self.process.poll() is None:
line = self.process.stdout.readline()
if line:
self.log.info("%s" % line.decode().strip("\n"))
``` |
{
"source": "2gis-test-labs/conf-utils",
"score": 3
} |
#### File: conf-utils/confetta/_git_folder_name.py
```python
from pathlib import Path
from typing import Union
__all__ = ("git_folder_name",)
def _git_folder_name(path: Path) -> Union[str, None]:
maybe_git = path / ".git"
if maybe_git.exists() and maybe_git.is_dir():
return path.name
if path == Path(path.root):
return None
return _git_folder_name(path.parent)
def git_folder_name(path: Union[Path, str, None] = None) -> Union[str, None]:
if path is None:
path = Path()
elif isinstance(path, str):
path = Path(path)
if not path.is_absolute():
path = path.absolute()
return _git_folder_name(path)
```
#### File: conf-utils/tests/test_docker_host.py
```python
import pytest
from confetta import docker_host
def test_docker_host_default():
assert docker_host() == "localhost"
@pytest.mark.parametrize(("env", "res"), [
("tcp://127.0.0.1:2375", "127.0.0.1"),
("tcp://192.168.59.106", "192.168.59.106")
])
def test_docker_host(env, res, monkeypatch):
monkeypatch.setenv("DOCKER_HOST", env)
assert docker_host() == res
``` |
{
"source": "2gis-test-labs/molotov-ext",
"score": 2
} |
#### File: molotov-ext/molotov_ext/__init__.py
```python
from argparse import Namespace
from functools import partial
from typing import Any
import molotov
from .formatters import DefaultFormatter
from .record_table import RecordTable
from .recorder import Recorder
from .reporter import Reporter
from .scenario import Scenario
__all__ = ("Reporter", "register_reporter", "scenario", "recorder")
recorder = Recorder(RecordTable())
scenario = partial(Scenario, recorder.on_starting_scenario)
@molotov.events()
async def event_listener(event: str, **info: Any) -> None:
if event == "sending_request":
recorder.on_sending_request(info["session"], info["request"])
elif event == "response_received":
recorder.on_response_received(info["session"], info["response"], info["request"])
elif event == "scenario_success":
recorder.on_scenario_success(info["scenario"]["name"], info["wid"])
elif event == "scenario_failure":
recorder.on_scenario_failure(info["scenario"]["name"], info["wid"], info['exception'])
elif event == "current_workers":
recorder.on_current_workers(info["workers"])
def register_reporter(args: Namespace) -> Reporter:
if args.processes > 1:
raise NotImplementedError('Возможность работы с несколькими процессами не поддерживается!')
return Reporter(recorder, DefaultFormatter())
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.