metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "josesanch/assets_angular",
"score": 2
} |
#### File: assets_angular/assets_angular/assets.py
```python
import os
from django.conf import settings
from django.template.defaultfilters import addslashes
from webassets.filter import Filter, register_filter
class AngularFilter(Filter):
name = 'angular'
max_debug_level = None
options = {
'module': 'ANGULAR_TEMPLATE_MODULE'
}
def get_name(self, source_path):
return os.path.relpath(source_path, os.path.dirname(settings.STATIC_ROOT))
def input(self, _in, out, **kw):
name = self.get_name(kw['source_path'])
content = addslashes(_in.read().replace('\n', ''))
str = u"\n$templateCache.put(\"{name}\",\"{content}\");".format(name=name, content=content)
out.write(str)
def concat(self, out, hunks, **kw):
name = self.module
out.write("angular.module('%s', []).run(['$templateCache', function($templateCache) {" % name)
for hunk, name in hunks:
out.write(hunk.data())
out.write("}]);")
register_filter(AngularFilter)
``` |
{
"source": "josesanch/restorm",
"score": 2
} |
#### File: josesanch/restorm/setup.py
```python
from distribute_setup import use_setuptools
use_setuptools()
import os
import sys
import restorm
from setuptools import setup, find_packages
def read_file(name):
return open(os.path.join(os.path.dirname(__file__), name)).read()
readme = read_file('README.rst')
changes = read_file('CHANGES.rst')
install_requires = [
'httplib2>=0.7.1',
'simplejson>=2.2.1'
]
tests_require = [
'nose',
'unittest2',
'mock',
'oauth2', # For Twitter example.
]
setup(
name='restorm',
version='.'.join(map(str, restorm.__version__)),
# Packaging.
packages=find_packages(exclude=('tests', 'examples')),
install_requires=install_requires,
tests_require=tests_require,
include_package_data=True,
zip_safe=False,
# Metadata for PyPI.
description='RestORM allows you to interact with resources as if they were objects.',
long_description='\n\n'.join([readme, changes]),
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
platforms=['any'],
url='http://github.com/joeribekker/restorm',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development',
],
)
``` |
{
"source": "josesandino/Python-con-JS-2021-Polotic-Misiones",
"score": 4
} |
#### File: Clase 4/Ejercicios/rectangulo.py
```python
class Rectangulo:
def __init__(self, longitud, ancho):
self.longitud = longitud
self.ancho = ancho
def area(self):
return self.longitud*self.ancho
lado = float(input("Ingrese la longitud del rectangulo: "))
ancho = float(input("Ingrese el ancho del rectangulo: "))
r1 = Rectangulo(lado, ancho)
print("Area: ", r1.area())
``` |
{
"source": "josesho/bootstrap-contrast",
"score": 3
} |
#### File: bootstrap-contrast/bootstrap_contrast/misc_tools.py
```python
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy.
Taken from https://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression"""
z = x.copy()
z.update(y)
return z
```
#### File: bootstrap_contrast/old__/paired_contrast_old.py
```python
from __future__ import division
from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, norm
from collections import OrderedDict
from numpy.random import randint
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator, MaxNLocator, LinearLocator, FixedLocator
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams, rcdefaults
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
# These have been placed in separate .py files for reduced code clutter.
from .mpl_tools import rotateTicks, normalizeSwarmY, normalizeContrastY, offsetSwarmX, resetSwarmX, getSwarmSpan
from .mpl_tools import align_yaxis, halfviolin, drawback_y, drawback_x
from .bootstrap_tools import ci, bootstrap, bootstrap_contrast, bootstrap_indexes, jackknife_indexes, getstatarray, bca
from .plot_bootstrap_tools import plotbootstrap, plotbootstrap_hubspoke, swarmsummary
def pairedcontrast(data, x, y, idcol, reps = 3000,
statfunction = None, idx = None, figsize = None,
beforeAfterSpacer = 0.01,
violinWidth = 0.005,
floatOffset = 0.05,
showRawData = False,
showAllYAxes = False,
floatContrast = True,
smoothboot = False,
floatViolinOffset = None,
showConnections = True,
summaryBar = False,
contrastYlim = None,
swarmYlim = None,
barWidth = 0.005,
rawMarkerSize = 8,
rawMarkerType = 'o',
summaryMarkerSize = 10,
summaryMarkerType = 'o',
summaryBarColor = 'grey',
meansSummaryLineStyle = 'solid',
contrastZeroLineStyle = 'solid', contrastEffectSizeLineStyle = 'solid',
contrastZeroLineColor = 'black', contrastEffectSizeLineColor = 'black',
pal = None,
legendLoc = 2, legendFontSize = 12, legendMarkerScale = 1,
axis_title_size = None,
yticksize = None,
xticksize = None,
tickAngle=45,
tickAlignment='right',
**kwargs):
# Preliminaries.
data = data.dropna()
# plot params
if axis_title_size is None:
axis_title_size = 15
if yticksize is None:
yticksize = 12
if xticksize is None:
xticksize = 12
axisTitleParams = {'labelsize' : axis_title_size}
xtickParams = {'labelsize' : xticksize}
ytickParams = {'labelsize' : yticksize}
rc('axes', **axisTitleParams)
rc('xtick', **xtickParams)
rc('ytick', **ytickParams)
## If `idx` is not specified, just take the FIRST TWO levels alphabetically.
if idx is None:
idx = tuple(np.unique(data[x])[0:2],)
else:
# check if multi-plot or not
if all(isinstance(element, str) for element in idx):
# if idx is supplied but not a multiplot (ie single list or tuple)
if len(idx) != 2:
print(idx, "does not have length 2.")
sys.exit(0)
else:
idx = (tuple(idx, ),)
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
if ( any(len(element) != 2 for element in idx) ):
# If any of the tuples contain more than 2 elements.
print(element, "does not have length 2.")
sys.exit(0)
if floatViolinOffset is None:
floatViolinOffset = beforeAfterSpacer/2
if contrastYlim is not None:
contrastYlim = np.array([contrastYlim[0],contrastYlim[1]])
if swarmYlim is not None:
swarmYlim = np.array([swarmYlim[0],swarmYlim[1]])
## Here we define the palette on all the levels of the 'x' column.
## Thus, if the same pandas dataframe is re-used across different plots,
## the color identity of each group will be maintained.
## Set palette based on total number of categories in data['x'] or data['hue_column']
if 'hue' in kwargs:
u = kwargs['hue']
else:
u = x
if ('color' not in kwargs and 'hue' not in kwargs):
kwargs['color'] = 'k'
if pal is None:
pal = dict( zip( data[u].unique(), sns.color_palette(n_colors = len(data[u].unique())) )
)
else:
pal = pal
# Initialise figure.
if figsize is None:
if len(idx) > 2:
figsize = (12,(12/np.sqrt(2)))
else:
figsize = (6,6)
fig = plt.figure(figsize = figsize)
# Initialise GridSpec based on `levs_tuple` shape.
gsMain = gridspec.GridSpec( 1, np.shape(idx)[0]) # 1 row; columns based on number of tuples in tuple.
# Set default statfunction
if statfunction is None:
statfunction = np.mean
# Create list to collect all the contrast DataFrames generated.
contrastList = list()
contrastListNames = list()
for gsIdx, xlevs in enumerate(idx):
## Pivot tempdat to get before and after lines.
data_pivot = data.pivot_table(index = idcol, columns = x, values = y)
# Start plotting!!
if floatContrast is True:
ax_raw = fig.add_subplot(gsMain[gsIdx], frame_on = False)
ax_contrast = ax_raw.twinx()
else:
gsSubGridSpec = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec = gsMain[gsIdx])
ax_raw = plt.Subplot(fig, gsSubGridSpec[0, 0], frame_on = False)
ax_contrast = plt.Subplot(fig, gsSubGridSpec[1, 0], sharex = ax_raw, frame_on = False)
## Plot raw data as swarmplot or stripplot.
if showRawData is True:
swarm_raw = sns.swarmplot(data = data,
x = x, y = y,
order = xlevs,
ax = ax_raw,
palette = pal,
size = rawMarkerSize,
marker = rawMarkerType,
**kwargs)
else:
swarm_raw = sns.stripplot(data = data,
x = x, y = y,
order = xlevs,
ax = ax_raw,
palette = pal,
**kwargs)
swarm_raw.set_ylim(swarmYlim)
## Get some details about the raw data.
maxXBefore = max(swarm_raw.collections[0].get_offsets().T[0])
minXAfter = min(swarm_raw.collections[1].get_offsets().T[0])
if showRawData is True:
#beforeAfterSpacer = (getSwarmSpan(swarm_raw, 0) + getSwarmSpan(swarm_raw, 1))/2
beforeAfterSpacer = 1
xposAfter = maxXBefore + beforeAfterSpacer
xAfterShift = minXAfter - xposAfter
## shift the after swarmpoints closer for aesthetic purposes.
offsetSwarmX(swarm_raw.collections[1], -xAfterShift)
## pandas DataFrame of 'before' group
x1 = pd.DataFrame({str(xlevs[0] + '_x') : pd.Series(swarm_raw.collections[0].get_offsets().T[0]),
xlevs[0] : pd.Series(swarm_raw.collections[0].get_offsets().T[1]),
'_R_' : pd.Series(swarm_raw.collections[0].get_facecolors().T[0]),
'_G_' : pd.Series(swarm_raw.collections[0].get_facecolors().T[1]),
'_B_' : pd.Series(swarm_raw.collections[0].get_facecolors().T[2]),
})
## join the RGB columns into a tuple, then assign to a column.
x1['_hue_'] = x1[['_R_', '_G_', '_B_']].apply(tuple, axis=1)
x1 = x1.sort_values(by = xlevs[0])
x1.index = data_pivot.sort_values(by = xlevs[0]).index
## pandas DataFrame of 'after' group
### create convenient signifiers for column names.
befX = str(xlevs[0] + '_x')
aftX = str(xlevs[1] + '_x')
x2 = pd.DataFrame( {aftX : pd.Series(swarm_raw.collections[1].get_offsets().T[0]),
xlevs[1] : pd.Series(swarm_raw.collections[1].get_offsets().T[1])} )
x2 = x2.sort_values(by = xlevs[1])
x2.index = data_pivot.sort_values(by = xlevs[1]).index
## Join x1 and x2, on both their indexes.
plotPoints = x1.merge(x2, left_index = True, right_index = True, how='outer')
## Add the hue column if hue argument was passed.
if 'hue' in kwargs:
h = kwargs['hue']
plotPoints[h] = data.pivot(index = idcol, columns = x, values = h)[xlevs[0]]
swarm_raw.legend(loc = legendLoc,
fontsize = legendFontSize,
markerscale = legendMarkerScale)
## Plot the lines to join the 'before' points to their respective 'after' points.
if showConnections is True:
for i in plotPoints.index:
ax_raw.plot([ plotPoints.ix[i, befX],
plotPoints.ix[i, aftX] ],
[ plotPoints.ix[i, xlevs[0]],
plotPoints.ix[i, xlevs[1]] ],
linestyle = 'solid',
color = plotPoints.ix[i, '_hue_'],
linewidth = 0.75,
alpha = 0.75
)
## Hide the raw swarmplot data if so desired.
if showRawData is False:
swarm_raw.collections[0].set_visible(False)
swarm_raw.collections[1].set_visible(False)
if showRawData is True:
#maxSwarmSpan = max(np.array([getSwarmSpan(swarm_raw, 0), getSwarmSpan(swarm_raw, 1)]))/2
maxSwarmSpan = 0.5
else:
maxSwarmSpan = barWidth
## Plot Summary Bar.
if summaryBar is True:
# Calculate means
means = data.groupby([x], sort = True).mean()[y]
# # Calculate medians
# medians = data.groupby([x], sort = True).median()[y]
## Draw summary bar.
bar_raw = sns.barplot(x = means.index,
y = means.values,
order = xlevs,
ax = ax_raw,
ci = 0,
facecolor = summaryBarColor,
alpha = 0.25)
## Draw zero reference line.
ax_raw.add_artist(Line2D(
(ax_raw.xaxis.get_view_interval()[0],
ax_raw.xaxis.get_view_interval()[1]),
(0,0),
color='black', linewidth=0.75
)
)
## get swarm with largest span, set as max width of each barplot.
for i, bar in enumerate(bar_raw.patches):
x_width = bar.get_x()
width = bar.get_width()
centre = x_width + width/2.
if i == 0:
bar.set_x(centre - maxSwarmSpan/2.)
else:
bar.set_x(centre - xAfterShift - maxSwarmSpan/2.)
bar.set_width(maxSwarmSpan)
# Get y-limits of the treatment swarm points.
beforeRaw = pd.DataFrame( swarm_raw.collections[0].get_offsets() )
afterRaw = pd.DataFrame( swarm_raw.collections[1].get_offsets() )
before_leftx = min(beforeRaw[0])
after_leftx = min(afterRaw[0])
after_rightx = max(afterRaw[0])
after_stat_summary = statfunction(beforeRaw[1])
# Calculate the summary difference and CI.
plotPoints['delta_y'] = plotPoints[xlevs[1]] - plotPoints[xlevs[0]]
plotPoints['delta_x'] = [0] * np.shape(plotPoints)[0]
tempseries = plotPoints['delta_y'].tolist()
test = tempseries.count(tempseries[0]) != len(tempseries)
bootsDelta = bootstrap(plotPoints['delta_y'],
statfunction = statfunction,
smoothboot = smoothboot,
reps = reps)
summDelta = bootsDelta['summary']
lowDelta = bootsDelta['bca_ci_low']
highDelta = bootsDelta['bca_ci_high']
# set new xpos for delta violin.
if floatContrast is True:
if showRawData is False:
xposPlusViolin = deltaSwarmX = after_rightx + floatViolinOffset
else:
xposPlusViolin = deltaSwarmX = after_rightx + maxSwarmSpan
else:
xposPlusViolin = xposAfter
if showRawData is True:
# If showRawData is True and floatContrast is True,
# set violinwidth to the barwidth.
violinWidth = maxSwarmSpan
xmaxPlot = xposPlusViolin + violinWidth
# Plot the summary measure.
ax_contrast.plot(xposPlusViolin, summDelta,
marker = 'o',
markerfacecolor = 'k',
markersize = summaryMarkerSize,
alpha = 0.75
)
# Plot the CI.
ax_contrast.plot([xposPlusViolin, xposPlusViolin],
[lowDelta, highDelta],
color = 'k',
alpha = 0.75,
linestyle = 'solid'
)
# Plot the violin-plot.
v = ax_contrast.violinplot(bootsDelta['stat_array'], [xposPlusViolin],
widths = violinWidth,
showextrema = False,
showmeans = False)
halfviolin(v, half = 'right', color = 'k')
# Remove left axes x-axis title.
ax_raw.set_xlabel("")
# Remove floating axes y-axis title.
ax_contrast.set_ylabel("")
# Set proper x-limits
ax_raw.set_xlim(before_leftx - beforeAfterSpacer/2, xmaxPlot)
ax_raw.get_xaxis().set_view_interval(before_leftx - beforeAfterSpacer/2,
after_rightx + beforeAfterSpacer/2)
ax_contrast.set_xlim(ax_raw.get_xlim())
if floatContrast is True:
# Set the ticks locations for ax_raw.
ax_raw.get_xaxis().set_ticks((0, xposAfter))
# Make sure they have the same y-limits.
ax_contrast.set_ylim(ax_raw.get_ylim())
# Drawing in the x-axis for ax_raw.
## Set the tick labels!
ax_raw.set_xticklabels(xlevs, rotation = tickAngle, horizontalalignment = tickAlignment)
## Get lowest y-value for ax_raw.
y = ax_raw.get_yaxis().get_view_interval()[0]
# Align the left axes and the floating axes.
align_yaxis(ax_raw, statfunction(plotPoints[xlevs[0]]),
ax_contrast, 0)
# Add label to floating axes. But on ax_raw!
ax_raw.text(x = deltaSwarmX,
y = ax_raw.get_yaxis().get_view_interval()[0],
horizontalalignment = 'left',
s = 'Difference',
fontsize = 15)
# Set reference lines
## zero line
ax_contrast.hlines(0, # y-coordinate
ax_contrast.xaxis.get_majorticklocs()[0], # x-coordinates, start and end.
ax_raw.xaxis.get_view_interval()[1],
linestyle = 'solid',
linewidth = 0.75,
color = 'black')
## effect size line
ax_contrast.hlines(summDelta,
ax_contrast.xaxis.get_majorticklocs()[1],
ax_raw.xaxis.get_view_interval()[1],
linestyle = 'solid',
linewidth = 0.75,
color = 'black')
# Align the left axes and the floating axes.
align_yaxis(ax_raw, after_stat_summary, ax_contrast, 0.)
else:
# Set the ticks locations for ax_raw.
ax_raw.get_xaxis().set_ticks((0, xposAfter))
fig.add_subplot(ax_raw)
fig.add_subplot(ax_contrast)
ax_contrast.set_ylim(contrastYlim)
# Calculate p-values.
# 1-sample t-test to see if the mean of the difference is different from 0.
ttestresult = ttest_1samp(plotPoints['delta_y'], popmean = 0)[1]
bootsDelta['ttest_pval'] = ttestresult
contrastList.append(bootsDelta)
contrastListNames.append( str(xlevs[1])+' v.s. '+str(xlevs[0]) )
# Turn contrastList into a pandas DataFrame,
contrastList = pd.DataFrame(contrastList).T
contrastList.columns = contrastListNames
# Now we iterate thru the contrast axes to normalize all the ylims.
for j,i in enumerate(range(1, len(fig.get_axes()), 2)):
axx=fig.get_axes()[i]
## Get max and min of the dataset.
lower = np.min(contrastList.ix['stat_array',j])
upper = np.max(contrastList.ix['stat_array',j])
meandiff = contrastList.ix['summary', j]
## Make sure we have zero in the limits.
if lower > 0:
lower = 0.
if upper < 0:
upper = 0.
## Get tick distance on raw axes.
## This will be the tick distance for the contrast axes.
rawAxesTicks = fig.get_axes()[i-1].yaxis.get_majorticklocs()
rawAxesTickDist = rawAxesTicks[1] - rawAxesTicks[0]
## First re-draw of axis with new tick interval
axx.yaxis.set_major_locator(MultipleLocator(rawAxesTickDist))
newticks1 = fig.get_axes()[i].get_yticks()
if floatContrast is False:
if (showAllYAxes is False and i in range( 2, len(fig.get_axes())) ):
axx.get_yaxis().set_visible(showAllYAxes)
else:
## Obtain major ticks that comfortably encompass lower and upper.
newticks2 = list()
for a,b in enumerate(newticks1):
if (b >= lower and b <= upper):
# if the tick lies within upper and lower, take it.
newticks2.append(b)
# if the meandiff falls outside of the newticks2 set, add a tick in the right direction.
if np.max(newticks2) < meandiff:
ind = np.where(newticks1 == np.max(newticks2))[0][0] # find out the max tick index in newticks1.
newticks2.append( newticks1[ind+1] )
elif meandiff < np.min(newticks2):
ind = np.where(newticks1 == np.min(newticks2))[0][0] # find out the min tick index in newticks1.
newticks2.append( newticks1[ind-1] )
newticks2 = np.array(newticks2)
newticks2.sort()
axx.yaxis.set_major_locator(FixedLocator(locs = newticks2))
## Draw zero reference line.
axx.hlines(y = 0,
xmin = fig.get_axes()[i].get_xaxis().get_view_interval()[0],
xmax = fig.get_axes()[i].get_xaxis().get_view_interval()[1],
linestyle = contrastZeroLineStyle,
linewidth = 0.75,
color = contrastZeroLineColor)
sns.despine(ax = fig.get_axes()[i], trim = True,
bottom = False, right = True,
left = False, top = True)
## Draw back the lines for the relevant y-axes.
drawback_y(axx)
## Draw back the lines for the relevant x-axes.
drawback_x(axx)
elif floatContrast is True:
## Get the original ticks on the floating y-axis.
newticks1 = fig.get_axes()[i].get_yticks()
## Obtain major ticks that comfortably encompass lower and upper.
newticks2 = list()
for a,b in enumerate(newticks1):
if (b >= lower and b <= upper):
# if the tick lies within upper and lower, take it.
newticks2.append(b)
# if the meandiff falls outside of the newticks2 set, add a tick in the right direction.
if np.max(newticks2) < meandiff:
ind = np.where(newticks1 == np.max(newticks2))[0][0] # find out the max tick index in newticks1.
newticks2.append( newticks1[ind+1] )
elif meandiff < np.min(newticks2):
ind = np.where(newticks1 == np.min(newticks2))[0][0] # find out the min tick index in newticks1.
newticks2.append( newticks1[ind-1] )
newticks2 = np.array(newticks2)
newticks2.sort()
## Re-draw the axis.
axx.yaxis.set_major_locator(FixedLocator(locs = newticks2))
## Despine and trim the axes.
sns.despine(ax = axx, trim = True,
bottom = False, right = False,
left = True, top = True)
for i in range(0, len(fig.get_axes()), 2):
# Loop through the raw data swarmplots and despine them appropriately.
if floatContrast is True:
sns.despine(ax = fig.get_axes()[i], trim = True, right = True)
else:
sns.despine(ax = fig.get_axes()[i], trim = True, bottom = True, right = True)
fig.get_axes()[i].get_xaxis().set_visible(False)
# Draw back the lines for the relevant y-axes.
ymin = fig.get_axes()[i].get_yaxis().get_majorticklocs()[0]
ymax = fig.get_axes()[i].get_yaxis().get_majorticklocs()[-1]
x, _ = fig.get_axes()[i].get_xaxis().get_view_interval()
fig.get_axes()[i].add_artist(Line2D((x, x), (ymin, ymax), color='black', linewidth=1.5))
# Zero gaps between plots on the same row, if floatContrast is False
if (floatContrast is False and showAllYAxes is False):
gsMain.update(wspace = 0)
else:
# Tight Layout!
gsMain.tight_layout(fig)
# And we're done.
rcdefaults() # restore matplotlib defaults.
sns.set() # restore seaborn defaults.
return fig, contrastList
```
#### File: bootstrap_contrast/old__/plot_bootstrap_tools.py
```python
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
def plotbootstrap(coll, bslist, ax, violinWidth,
violinOffset, marker='o', color='k',
markerAlpha=0.75,
markersize=None,
CiAlpha=0.75,
offset=True,
linewidth=2,
rightspace=0.2,
**kwargs):
'''subfunction to plot the bootstrapped distribution along with BCa intervals.'''
if markersize is None:
mSize=12.
else:
mSize=markersize
autoxmin=ax.get_xlim()[0]
x, _=np.array(coll.get_offsets()).T
xmax=x.max()
if offset:
violinbasex=xmax + violinOffset
else:
violinbasex=1
# array=list(bslist.items())[7][1]
array=bslist['diffarray']
v=ax.violinplot(array, [violinbasex],
widths=violinWidth * 2,
showextrema=False, showmeans=False)
for b in v['bodies']:
m=np.nanmean(b.get_paths()[0].vertices[:, 0])
b.get_paths()[0].vertices[:, 0]=np.clip(b.get_paths()[0].vertices[:, 0], m, np.inf)
b.set_color('k')
# Plot the summary measure.
ax.plot(violinbasex, bslist['summary'],
marker=marker,
markerfacecolor=color,
markersize=mSize,
alpha=markerAlpha
)
# Plot the CI.
ax.plot([violinbasex, violinbasex],
[bslist['bca_ci_low'], bslist['bca_ci_high']],
color=color,
alpha=CiAlpha,
linestyle='solid'
)
ax.set_xlim(autoxmin, (violinbasex + violinWidth + rightspace))
if array.min() < 0 < array.min():
ax.set_ylim(array.min(), array.max())
elif 0 <= array.min():
ax.set_ylim(0, array.max() * 1.1)
elif 0 >= array.max():
ax.set_ylim(array.min() * 1.1, 0)
def plotbootstrap_hubspoke(bslist, ax, violinWidth, violinOffset,
marker='o', color='k',
markerAlpha=0.75,
markersize=None,
CiAlpha=0.75,
linewidth=2,
**kwargs):
'''subfunction to plot the bootstrapped distribution along with BCa intervals for hub-spoke plots.'''
if markersize is None:
mSize=12.
else:
mSize=markersize
ylims=list()
for i in range(0, len(bslist)):
bsi=bslist[i]
# array=list(bsi.items())[7][1] # Pull out the bootstrapped array.
array=bsi['diffarray']
ylims.append(array)
# Then plot as violinplot.
v=ax.violinplot(array, [i+1],
widths=violinWidth * 2,
showextrema=False, showmeans=False)
for b in v['bodies']:
m=np.mean(b.get_paths()[0].vertices[:, 0])
b.get_paths()[0].vertices[:, 0]=np.clip(b.get_paths()[0].vertices[:, 0], m, np.inf)
b.set_color('k')
# Plot the summary measure.
ax.plot(i+1, bsi['summary'],
marker=marker,
markerfacecolor=color,
markersize=mSize,
alpha=markerAlpha
)
# Plot the CI.
ax.plot([i+1, i+1],
[bsi['bca_ci_low'], bsi['bca_ci_high']],
color=color,
alpha=CiAlpha,
linestyle='solid'
)
ylims=np.array(ylims).flatten()
if ylims.min() < 0 and ylims.max() < 0: # All effect sizes are less than 0.
ax.set_ylim(1.1 * ylims.min(), 0)
elif ylims.min() > 0: # All effect sizes are more than 0.
ax.set_ylim(-0.25, 1.1 * ylims.max())
elif ylims.min() < 0 < ylims.max(): # One or more effect sizes straddle 0.
ax.set_ylim(1.1 * ylims.min(), 1.1 * ylims.max())
def swarmsummary(data, x, y, idx=None, statfunction=None,
violinOffset=0.1, violinWidth=0.2,
figsize=(7,7), legend=True,
smoothboot=False,
rawMarkerSize=10,
summaryMarkerSize=12,
rawMarkerType='o',
summaryMarkerType='o',
**kwargs):
df=data # so we don't re-order the rawdata!
# initialise statfunction
if statfunction == None:
statfunction=np.mean
# calculate bootstrap list.
bslist=OrderedDict()
if idx is None:
levs=df[x].unique() # DO NOT USE the numpy.unique() method.
# It will not preserve the order of appearance of the levels.
else:
levs=idx
for i in range (0, len(levs)):
temp_df=df.loc[df[x] == levs[i]]
bslist[levs[i]]=bootstrap(temp_df[y], statfunction=statfunction, smoothboot=smoothboot)
bsplotlist=list(bslist.items())
# Initialise figure
#sns.set_style('ticks')
fig, ax=plt.subplots(figsize=figsize)
sw=sns.swarmplot(data=df, x=x, y=y, order=levs,
size=rawMarkerSize, marker=rawMarkerType, **kwargs)
y_lims=list()
for i in range(0, len(bslist)):
plotbootstrap(sw.collections[i],
bslist=bsplotlist[i][1],
ax=ax,
violinWidth=violinWidth,
violinOffset=violinOffset,
marker=summaryMarkerType,
markersize=summaryMarkerSize,
color='k',
linewidth=2)
# Get the y-offsets, save into a list.
_, y=np.array(sw.collections[i].get_offsets()).T
y_lims.append(y)
# Concatenate the list of y-offsets
y_lims=np.concatenate(y_lims)
ax.set_ylim(0.9 * y_lims.min(), 1.1 * y_lims.max())
if legend is True:
ax.legend(loc='center left', bbox_to_anchor=(1.1, 1))
elif legend is False:
ax.legend().set_visible(False)
sns.despine(ax=ax, trim=True)
return fig, pd.DataFrame.from_dict(bslist)
```
#### File: bootstrap_contrast/old__/sandbox.py
```python
from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, norm
from collections import OrderedDict
from numpy.random import randint
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator, FixedLocator, AutoLocator, FormatStrFormatter
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams, rcdefaults
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# This imports the custom functions used.
# These have been placed in separate .py files for reduced code clutter.
from .mpl_tools import rotateTicks, normalizeSwarmY, normalizeContrastY, offsetSwarmX, resetSwarmX, getSwarmSpan
from .mpl_tools import align_yaxis, halfviolin, drawback_y, drawback_x
from .bootstrap_tools import ci, bootstrap, bootstrap_contrast, bootstrap_indexes, jackknife_indexes, getstatarray, bca
from .plot_bootstrap_tools import plotbootstrap, plotbootstrap_hubspoke, swarmsummary
def contrastplot_test(
data, x, y, idx=None,
alpha=0.75,
axis_title_size=None,
barWidth=5,
contrastShareY=True,
contrastEffectSizeLineStyle='solid',
contrastEffectSizeLineColor='black',
contrastYlim=None,
contrastZeroLineStyle='solid',
contrastZeroLineColor='black',
effectSizeYLabel="Effect Size",
figsize=None,
floatContrast=True,
floatSwarmSpacer=0.2,
heightRatio=(1, 1),
idcol=None,
lineWidth=2,
legend=True,
legendFontSize=14,
legendFontProps={},
paired=False,
pal=None,
rawMarkerSize=8,
rawMarkerType='o',
reps=3000,
showGroupCount=True,
show95CI=False,
showAllYAxes=False,
showRawData=True,
smoothboot=False,
statfunction=None,
summaryBar=False,
summaryBarColor='grey',
summaryBarAlpha=0.25,
summaryColour='black',
summaryLine=True,
summaryLineStyle='solid',
summaryLineWidth=0.25,
summaryMarkerSize=10,
summaryMarkerType='o',
swarmShareY=True,
swarmYlim=None,
tickAngle=45,
tickAlignment='right',
violinOffset=0.375,
violinWidth=0.2,
violinColor='k',
xticksize=None,
yticksize=None,
**kwargs):
'''Takes a pandas dataframe and produces a contrast plot:
either a Cummings hub-and-spoke plot or a Gardner-Altman contrast plot.
-----------------------------------------------------------------------
Description of flags upcoming.'''
# Check that `data` is a pandas dataframe
if 'DataFrame' not in str(type(data)):
raise TypeError("The object passed to the command is not not a pandas DataFrame.\
Please convert it to a pandas DataFrame.")
# Get and set levels of data[x]
if idx is None:
widthratio=[1]
allgrps=np.sort(data[x].unique())
if paired:
# If `idx` is not specified, just take the FIRST TWO levels alphabetically.
tuple_in=tuple(allgrps[0:2],)
else:
# No idx is given, so all groups are compared to the first one in the DataFrame column.
tuple_in=(tuple(allgrps), )
if len(allgrps)>2:
floatContrast=False
else:
if all(isinstance(element, str) for element in idx):
# if idx is supplied but not a multiplot (ie single list or tuple)
tuple_in=(idx, )
widthratio=[1]
if len(idx)>2:
floatContrast=False
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
tuple_in=idx
if ( any(len(element)>2 for element in tuple_in) ):
# if any of the tuples in idx has more than 2 groups, we turn set floatContrast as False.
floatContrast=False
# Make sure the widthratio of the seperate multiplot corresponds to how
# many groups there are in each one.
widthratio=[]
for i in tuple_in:
widthratio.append(len(i))
else:
raise TypeError("The object passed to `idx` consists of a mixture of single strings and tuples. \
Please make sure that `idx` is either a tuple of column names, or a tuple of tuples for plotting.")
# initialise statfunction
if statfunction == None:
statfunction=np.mean
# Create list to collect all the contrast DataFrames generated.
contrastList=list()
contrastListNames=list()
# # Calculate the bootstraps according to idx.
# for ix, current_tuple in enumerate(tuple_in):
# bscontrast=list()
# for i in range (1, len(current_tuple)):
# # Note that you start from one. No need to do auto-contrast!
# tempbs=bootstrap_contrast(
# data=data,
# x=x,
# y=y,
# idx=[current_tuple[0], current_tuple[i]],
# statfunction=statfunction,
# smoothboot=smoothboot,
# reps=reps)
# bscontrast.append(tempbs)
# contrastList.append(tempbs)
# contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
# Setting color palette for plotting.
if pal is None:
if 'hue' in kwargs:
colorCol=kwargs['hue']
colGrps=data[colorCol].unique()
nColors=len(colGrps)
else:
colorCol=x
colGrps=data[x].unique()
nColors=len([element for tupl in tuple_in for element in tupl])
plotPal=dict( zip( colGrps, sns.color_palette(n_colors=nColors) ) )
else:
plotPal=pal
# Ensure summaryLine and summaryBar are not displayed together.
if summaryLine is True and summaryBar is True:
summaryBar=True
summaryLine=False
# Turn off summary line if floatContrast is true
if floatContrast:
summaryLine=False
if swarmYlim is None:
# get range of _selected groups_.
u = list()
for t in idx:
for i in np.unique(t):
u.append(i)
u = np.unique(u)
tempdat=data[data[x].isin(u)]
swarm_ylim=np.array([np.min(tempdat[y]), np.max(tempdat[y])])
else:
swarm_ylim=np.array([swarmYlim[0],swarmYlim[1]])
if contrastYlim is not None:
contrastYlim=np.array([contrastYlim[0],contrastYlim[1]])
barWidth=barWidth/1000 # Not sure why have to reduce the barwidth by this much!
if showRawData is True:
maxSwarmSpan=0.25
else:
maxSwarmSpan=barWidth
# Expand the ylim in both directions.
## Find half of the range of swarm_ylim.
swarmrange=swarm_ylim[1] -swarm_ylim[0]
pad=0.1*swarmrange
x2=np.array([swarm_ylim[0]-pad, swarm_ylim[1]+pad])
swarm_ylim=x2
# plot params
if axis_title_size is None:
axis_title_size=25
if yticksize is None:
yticksize=18
if xticksize is None:
xticksize=18
# Set clean style
sns.set(style='ticks')
axisTitleParams={'labelsize' : axis_title_size}
xtickParams={'labelsize' : xticksize}
ytickParams={'labelsize' : yticksize}
svgParams={'fonttype' : 'none'}
rc('axes', **axisTitleParams)
rc('xtick', **xtickParams)
rc('ytick', **ytickParams)
rc('svg', **svgParams)
if figsize is None:
if len(tuple_in)>2:
figsize=(12,(12/np.sqrt(2)))
else:
figsize=(8,(8/np.sqrt(2)))
# Initialise figure, taking into account desired figsize.
fig=plt.figure(figsize=figsize)
# Initialise GridSpec based on `tuple_in` shape.
gsMain=gridspec.GridSpec(
1, np.shape(tuple_in)[0],
# 1 row; columns based on number of tuples in tuple.
width_ratios=widthratio,
wspace=0 )
for gsIdx, current_tuple in enumerate(tuple_in):
#### FOR EACH TUPLE IN IDX
plotdat=data[data[x].isin(current_tuple)]
plotdat[x]=plotdat[x].astype("category")
plotdat[x].cat.set_categories(
current_tuple,
ordered=True,
inplace=True)
plotdat.sort_values(by=[x])
# Drop all nans.
plotdat=plotdat.dropna()
# Calculate summaries.
summaries=plotdat.groupby([x],sort=True)[y].apply(statfunction)
if floatContrast is True:
# Use fig.add_subplot instead of plt.Subplot
ax_raw=fig.add_subplot(gsMain[gsIdx],
frame_on=False)
ax_contrast=ax_raw.twinx()
else:
# Create subGridSpec with 2 rows and 1 column.
subGridSpec=gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gsMain[gsIdx],
wspace=0)
# Use plt.Subplot instead of fig.add_subplot
ax_raw=plt.Subplot(fig,
subGridSpec[0, 0],
frame_on=False)
ax_contrast=plt.Subplot(fig,
subGridSpec[1, 0],
sharex=ax_raw,
frame_on=False)
# Calculate the boostrapped contrast
bscontrast=list()
for i in range (1, len(current_tuple)):
# Note that you start from one. No need to do auto-contrast!
tempbs=bootstrap_contrast(
data=data,
x=x,
y=y,
idx=[current_tuple[0], current_tuple[i]],
statfunction=statfunction,
smoothboot=smoothboot,
reps=reps)
bscontrast.append(tempbs)
contrastList.append(tempbs)
contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
#### PLOT RAW DATA.
if showRawData is True:
# Seaborn swarmplot doc says to set custom ylims first.
ax_raw.set_ylim(swarm_ylim)
sw=sns.swarmplot(
data=plotdat,
x=x, y=y,
order=current_tuple,
ax=ax_raw,
alpha=alpha,
palette=plotPal,
size=rawMarkerSize,
marker=rawMarkerType,
**kwargs)
if summaryBar is True:
bar_raw=sns.barplot(
x=summaries.index.tolist(),
y=summaries.values,
facecolor=summaryBarColor,
ax=ax_raw,
alpha=summaryBarAlpha)
if floatContrast:
# Get horizontal offset values.
maxXBefore=max(sw.collections[0].get_offsets().T[0])
minXAfter=min(sw.collections[1].get_offsets().T[0])
xposAfter=maxXBefore+floatSwarmSpacer
xAfterShift=minXAfter-xposAfter
# shift the swarmplots
offsetSwarmX(sw.collections[1], -xAfterShift)
## get swarm with largest span, set as max width of each barplot.
for i, bar in enumerate(bar_raw.patches):
x_width=bar.get_x()
width=bar.get_width()
centre=x_width + (width/2.)
if i == 0:
bar.set_x(centre-maxSwarmSpan/2.)
else:
bar.set_x(centre-xAfterShift-maxSwarmSpan/2.)
bar.set_width(maxSwarmSpan)
## Set the ticks locations for ax_raw.
ax_raw.xaxis.set_ticks((0, xposAfter))
firstTick=ax_raw.xaxis.get_ticklabels()[0].get_text()
secondTick=ax_raw.xaxis.get_ticklabels()[1].get_text()
ax_raw.set_xticklabels([firstTick,#+' n='+count[firstTick],
secondTick],#+' n='+count[secondTick]],
rotation=tickAngle,
horizontalalignment=tickAlignment)
if summaryLine is True:
for i, m in enumerate(summaries):
ax_raw.plot(
(i -summaryLineWidth,
i + summaryLineWidth), # x-coordinates
(m, m),
color=summaryColour,
linestyle=summaryLineStyle)
if show95CI is True:
sns.barplot(
data=plotdat,
x=x, y=y,
ax=ax_raw,
alpha=0, ci=95)
ax_raw.set_xlabel("")
if floatContrast is False:
fig.add_subplot(ax_raw)
#### PLOT CONTRAST DATA.
if len(current_tuple)==2:
# Plot the CIs on the contrast axes.
plotbootstrap(sw.collections[1],
bslist=tempbs,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
offset=floatContrast,
color=violinColor,
linewidth=1)
if floatContrast:
# Set reference lines
## First get leftmost limit of left reference group
xtemp, _=np.array(sw.collections[0].get_offsets()).T
leftxlim=xtemp.min()
## Then get leftmost limit of right test group
xtemp, _=np.array(sw.collections[1].get_offsets()).T
rightxlim=xtemp.min()
## zero line
ax_contrast.hlines(0, # y-coordinates
leftxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
## effect size line
ax_contrast.hlines(tempbs['summary'],
rightxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastEffectSizeLineStyle,
linewidth=0.75,
color=contrastEffectSizeLineColor)
## If the effect size is positive, shift the right axis up.
if float(tempbs['summary'])>0:
rightmin=ax_raw.get_ylim()[0] -float(tempbs['summary'])
rightmax=ax_raw.get_ylim()[1] -float(tempbs['summary'])
## If the effect size is negative, shift the right axis down.
elif float(tempbs['summary'])<0:
rightmin=ax_raw.get_ylim()[0] + float(tempbs['summary'])
rightmax=ax_raw.get_ylim()[1] + float(tempbs['summary'])
ax_contrast.set_ylim(rightmin, rightmax)
if gsIdx>0:
ax_contrast.set_ylabel('')
align_yaxis(ax_raw, tempbs['statistic_ref'], ax_contrast, 0.)
else:
# Set bottom axes ybounds
if contrastYlim is not None:
ax_contrast.set_ylim(contrastYlim)
# Set xlims so everything is properly visible!
swarm_xbounds=ax_raw.get_xbound()
ax_contrast.set_xbound(swarm_xbounds[0] -(summaryLineWidth * 1.1),
swarm_xbounds[1] + (summaryLineWidth * 1.1))
else:
# Plot the CIs on the bottom axes.
plotbootstrap_hubspoke(
bslist=bscontrast,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
linewidth=lineWidth)
if floatContrast is False:
fig.add_subplot(ax_contrast)
if gsIdx>0:
ax_raw.set_ylabel('')
ax_contrast.set_ylabel('')
# Turn contrastList into a pandas DataFrame,
contrastList=pd.DataFrame(contrastList).T
contrastList.columns=contrastListNames
########
axesCount=len(fig.get_axes())
## Loop thru SWARM axes for aesthetic touchups.
for i in range(0, axesCount, 2):
axx=fig.axes[i]
if i!=axesCount-2 and 'hue' in kwargs:
# If this is not the final swarmplot, remove the hue legend.
axx.legend().set_visible(False)
if floatContrast is False:
axx.xaxis.set_visible(False)
sns.despine(ax=axx, trim=True, bottom=False, left=False)
else:
sns.despine(ax=axx, trim=True, bottom=True, left=True)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(showAllYAxes)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
# Add zero reference line for swarmplots with bars.
if summaryBar is True:
axx.add_artist(Line2D(
(axx.xaxis.get_view_interval()[0],
axx.xaxis.get_view_interval()[1]),
(0,0),
color='black', linewidth=0.75
)
)
# I don't know why the swarm axes controls the contrast axes ticks....
if showGroupCount:
count=data.groupby(x).count()[y]
newticks=list()
for ix, t in enumerate(axx.xaxis.get_ticklabels()):
t_text=t.get_text()
nt=t_text+' n='+str(count[t_text])
newticks.append(nt)
axx.xaxis.set_ticklabels(newticks)
if legend is False:
axx.legend().set_visible(False)
else:
if i==axesCount-2: # the last (rightmost) swarm axes.
axx.legend(loc='top right',
bbox_to_anchor=(1.1,1.0),
fontsize=legendFontSize,
**legendFontProps)
## Loop thru the CONTRAST axes and perform aesthetic touch-ups.
## Get the y-limits:
for j,i in enumerate(range(1, axesCount, 2)):
axx=fig.get_axes()[i]
if floatContrast is False:
xleft, xright=axx.xaxis.get_view_interval()
# Draw zero reference line.
axx.hlines(y=0,
xmin=xleft-1,
xmax=xright+1,
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
# reset view interval.
axx.set_xlim(xleft, xright)
# # Draw back x-axis lines connecting ticks.
# drawback_x(axx)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(False)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
sns.despine(ax=axx,
top=True, right=True,
left=False, bottom=False,
trim=True)
# Rotate tick labels.
rotateTicks(axx,tickAngle,tickAlignment)
else:
# Re-draw the floating axis to the correct limits.
lower=np.min(contrastList.ix['diffarray',j])
upper=np.max(contrastList.ix['diffarray',j])
meandiff=contrastList.ix['summary', j]
## Make sure we have zero in the limits.
if lower>0:
lower=0.
if upper<0:
upper=0.
## Get the tick interval from the left y-axis.
leftticks=fig.get_axes()[i-1].get_yticks()
tickstep=leftticks[1] -leftticks[0]
## First re-draw of axis with new tick interval
axx.yaxis.set_major_locator(MultipleLocator(base=tickstep))
newticks1=axx.get_yticks()
## Obtain major ticks that comfortably encompass lower and upper.
newticks2=list()
for a,b in enumerate(newticks1):
if (b >= lower and b <= upper):
# if the tick lies within upper and lower, take it.
newticks2.append(b)
# if the meandiff falls outside of the newticks2 set, add a tick in the right direction.
if np.max(newticks2)<meandiff:
ind=np.where(newticks1 == np.max(newticks2))[0][0] # find out the max tick index in newticks1.
newticks2.append( newticks1[ind+1] )
elif meandiff<np.min(newticks2):
ind=np.where(newticks1 == np.min(newticks2))[0][0] # find out the min tick index in newticks1.
newticks2.append( newticks1[ind-1] )
newticks2=np.array(newticks2)
newticks2.sort()
## Second re-draw of axis to shrink it to desired limits.
axx.yaxis.set_major_locator(FixedLocator(locs=newticks2))
## Despine the axes.
sns.despine(ax=axx, trim=True,
bottom=False, right=False,
left=True, top=True)
# Normalize bottom/right Contrast axes to each other for Cummings hub-and-spoke plots.
if (axesCount>2 and
contrastShareY is True and
floatContrast is False):
# Set contrast ylim as max ticks of leftmost swarm axes.
if contrastYlim is None:
lower=list()
upper=list()
for c in range(0,len(contrastList.columns)):
lower.append( np.min(contrastList.ix['bca_ci_low',c]) )
upper.append( np.max(contrastList.ix['bca_ci_high',c]) )
lower=np.min(lower)
upper=np.max(upper)
else:
lower=contrastYlim[0]
upper=contrastYlim[1]
normalizeContrastY(fig,
contrast_ylim = contrastYlim,
show_all_yaxes = showAllYAxes)
# if (axesCount==2 and
# floatContrast is False):
# drawback_x(fig.get_axes()[1])
# drawback_y(fig.get_axes()[1])
# if swarmShareY is False:
# for i in range(0, axesCount, 2):
# drawback_y(fig.get_axes()[i])
# if contrastShareY is False:
# for i in range(1, axesCount, 2):
# if floatContrast is True:
# sns.despine(ax=fig.get_axes()[i],
# top=True, right=False, left=True, bottom=True,
# trim=True)
# else:
# sns.despine(ax=fig.get_axes()[i], trim=True)
# Zero gaps between plots on the same row, if floatContrast is False
if (floatContrast is False and showAllYAxes is False):
gsMain.update(wspace=0.)
else:
# Tight Layout!
gsMain.tight_layout(fig)
# And we're all done.
rcdefaults() # restore matplotlib defaults.
sns.set() # restore seaborn defaults.
return fig, contrastList
``` |
{
"source": "JoseSierraVzl/RODE",
"score": 2
} |
#### File: RODE/RODE/RODE.py
```python
import sys
import time
from os import *
import sqlite3
import requests
from requests import exceptions
import json
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets, QtGui
from source_rc import *
from style import *
class main(QDialog):
def __init__(self, parent = None):
super(main, self).__init__()
#self.setObjectName("Dialog")
self.setWindowTitle("Registro de deudores")
self.setWindowFlags(Qt.WindowTitleHint | Qt.WindowCloseButtonHint | Qt.WindowMinimizeButtonHint)
self.setWindowIcon(QIcon(":/Icono/img/calendar.png"))
self.setFixedSize(654, 490)
self.setStyleSheet("QDialog{\n"
"background-color: #fff}")
self.initUi()
self.mostrar_datos()
# self._timer = QTimer()
# self._timer.singleShot(5000, self.valor_dolar)
#self.valor_dolar
def initUi(self):
self.shadow = QGraphicsDropShadowEffect()
self.shadow.setBlurRadius(15)
#Frame title
self.frame_title = QFrame(self)
self.frame_title.setGeometry(QRect(0,0,653,71))
self.frame_title.setStyleSheet(style_frame_title)
self.label_title = QLabel(self.frame_title)
self.label_title.setGeometry(QRect(0,0,491,71))
self.label_title.setAlignment(Qt.AlignCenter)
self.label_title.setText("REGISTRO DE DEUDORES")
self.label_title.setFont(QtGui.QFont("Roboto", 20))
# self.label_title.setStyleSheet(style_label_title)
##########
#Label de precios
self.label_copias = QLabel(self.frame_title)
self.label_copias.setGeometry(QRect(460,5,161,21))
self.label_copias.setAlignment(Qt.AlignCenter)
self.label_copias.setText("")
self.label_impresiones = QLabel(self.frame_title)
self.label_impresiones.setGeometry(QRect(458,25,191,21))
self.label_impresiones.setAlignment(Qt.AlignCenter)
self.label_impresiones.setText("")
self.label_internet = QLabel(self.frame_title)
self.label_internet.setGeometry(QRect(460,45,191,21))
self.label_internet.setAlignment(Qt.AlignCenter)
self.label_internet.setText("")
#################
#Frame menu
self.frame_menu = QFrame(self)
self.frame_menu.setGeometry(QRect(0,70,91,421))
self.frame_menu.setStyleSheet(style_frame_menu)
self.button_actualizar = QPushButton(self.frame_menu)
self.button_actualizar.setGeometry(QRect(20,11,24,24))
self.button_actualizar.setStyleSheet(style_actualizar)
self.button_actualizar.setToolTip("Click para actualizar tabla")
self.button_actualizar.setIcon(QIcon(":/Recargar/img/Recargar.png"))
self.button_actualizar.setIconSize(QSize(26,26))
self.button_actualizar_dolar = QPushButton(self.frame_menu)
self.button_actualizar_dolar.setGeometry(QRect(50,10,24,24))
self.button_actualizar_dolar.setStyleSheet(style_actualizar)
self.button_actualizar_dolar.setToolTip("Actualizar tasa de intercambio del dolar")
self.button_actualizar_dolar.setIcon(QIcon(":/Dolar_recarga/img/Recargar_dolar.png"))
self.button_actualizar_dolar.setIconSize(QSize(20,20))
self.button_agregar = QPushButton(self.frame_menu)
self.button_agregar.setGeometry(QRect(5,160,81,25))
self.button_agregar.setStyleSheet(style_eliminar_agregar)
self.button_agregar.setIcon(QIcon(":/Agregar/img/Lapiz_negro.png"))
self.button_agregar.setText("Agregar")
self.button_agregar.setIconSize(QSize(16,16))
self.button_eliminar = QPushButton(self.frame_menu)
self.button_eliminar.setGeometry(QRect(5,200,81,25))
self.button_eliminar.setStyleSheet(style_eliminar_agregar)
self.button_eliminar.setIcon(QIcon(":/Eliminar/img/Papelera_negro.png"))
self.button_eliminar.setText("Eliminar")
self.button_eliminar.setIconSize(QSize(19,19))
##########
#Frame menu_dos
self.frame_menu_dos = QFrame(self)
self.frame_menu_dos.setGeometry(QRect(91,70,564,41))
self.frame_menu_dos.setStyleSheet(style_menu_dos)
self.line_edit_buscar = QLineEdit(self.frame_menu_dos)
self.line_edit_buscar.setGeometry(QRect(20,7,151,25))
self.line_edit_buscar.setObjectName("Enter")
self.line_edit_buscar.setPlaceholderText("Ingresa nombre")
self.line_edit_buscar.setStyleSheet(style_line_edit)
self.button_buscar = QPushButton(self.frame_menu_dos)
self.button_buscar.setGeometry(QRect(150,7,21,25))
self.button_buscar.setStyleSheet(style_eliminar_agregar)
self.button_buscar.setObjectName("Buscar")
self.button_buscar.setIcon(QIcon(":/Buscar/img/Lupa_negra.png"))
self.button_buscar.setText("")
self.button_buscar.setIconSize(QSize(16,16))
self.button_buscar_por = QPushButton(self.frame_menu_dos)
self.button_buscar_por.setGeometry(QRect(200,7,81,25))
self.button_buscar_por.setStyleSheet(style_eliminar_agregar)
self.button_buscar_por.setText("Calculadora")
self.label_ultimo_registro = QLabel(self.frame_menu_dos)
self.label_ultimo_registro.setGeometry(QRect(310,10,231,20))
self.label_ultimo_registro.setText("")
self.label_ultimo_registro.setAlignment(Qt.AlignCenter)
self.label_ultimo_registro.setStyleSheet(style_ultimo_registro)
###############
#Tabla de registro
nombreColumnas = ("ID", "Nombre", "Descripción de deuda",
"Monto de deuda", "Fecha", "Hora")
self.Tabla_registro = QTableWidget(self)
self.Tabla_registro.setToolTip("Click para ver usuario")
self.Tabla_registro.setGeometry(QRect(97,120,552,361))
self.Tabla_registro.setStyleSheet(style_qtable_contenido)
self.Tabla_registro.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.Tabla_registro.setDragDropOverwriteMode(False)
self.Tabla_registro.setSelectionBehavior(QAbstractItemView.SelectRows)
self.Tabla_registro.setSelectionMode(QAbstractItemView.SingleSelection)
self.Tabla_registro.setTextElideMode(Qt.ElideRight)
self.Tabla_registro.setWordWrap(False)
self.Tabla_registro.setSortingEnabled(False)
self.Tabla_registro.setColumnCount(6)
self.Tabla_registro.setRowCount(0)
self.Tabla_registro.horizontalHeader().setDefaultAlignment(Qt.AlignHCenter|Qt.AlignVCenter|
Qt.AlignCenter)
self.Tabla_registro.horizontalHeader().setHighlightSections(False)
self.Tabla_registro.horizontalHeader().setStretchLastSection(True)
self.Tabla_registro.verticalHeader().setVisible(False)
self.Tabla_registro.setAlternatingRowColors(False)
self.Tabla_registro.verticalHeader().setDefaultSectionSize(20)
self.Tabla_registro.setHorizontalHeaderLabels(nombreColumnas)
self.Tabla_registro.itemDoubleClicked.connect(self.Item_click)
for indice, ancho in enumerate((50, 100, 300, 100,100,80), start=0):
self.Tabla_registro.setColumnWidth(indice, ancho)
##################
#Registrar un nuevo deudor
self.frame_registro_nuevo = QFrame(self)
self.frame_registro_nuevo.setGeometry(QRect(-1000,160,151,312))
self.frame_registro_nuevo.setStyleSheet(style_menu_dos)
self.frame_registro_nuevo.setGraphicsEffect(self.shadow)
self.label_nombre_apellido = QLabel(self.frame_registro_nuevo)
self.label_nombre_apellido.setGeometry(QRect(10,10,131,20))
self.label_nombre_apellido.setText("Nombre y Apellido")
self.label_nombre_apellido.setAlignment(Qt.AlignCenter)
self.label_nombre_apellido.setStyleSheet(style_ultimo_registro)
self.line_edit_nombre_apellido = QLineEdit(self.frame_registro_nuevo)
self.line_edit_nombre_apellido.setGeometry(QRect(10,35,131,20))
self.line_edit_nombre_apellido.setStyleSheet(style_line_edit)
self.line_edit_nombre_apellido.setPlaceholderText("Ingresa aquí")
self.line_edit_nombre_apellido.setToolTip("Ingresa el nombre y apellido de\nla persona deudora")
self.label_descripcion_deuda = QLabel(self.frame_registro_nuevo)
self.label_descripcion_deuda.setGeometry(QRect(10,70,131,20))
self.label_descripcion_deuda.setText("Descripción de deuda")
self.label_descripcion_deuda.setAlignment(Qt.AlignCenter)
self.label_descripcion_deuda.setStyleSheet(style_ultimo_registro)
self.text_edit_descripcion = QTextEdit(self.frame_registro_nuevo)
self.text_edit_descripcion.setGeometry(QRect(10,95,131,111))
self.text_edit_descripcion.setPlaceholderText("Ingrese la descrición aquí")
self.text_edit_descripcion.setToolTip("Describa de forma breve y detallada\nla deuda de la persona")
self.text_edit_descripcion.setStyleSheet(style_text_edit)
self.label_monto_deuda = QLabel(self.frame_registro_nuevo)
self.label_monto_deuda.setGeometry(QRect(10,220,131,20))
self.label_monto_deuda.setText("Monto de deuda")
self.label_monto_deuda.setAlignment(Qt.AlignCenter)
self.label_monto_deuda.setStyleSheet(style_ultimo_registro)
self.line_edit_monto = QLineEdit(self.frame_registro_nuevo)
self.line_edit_monto.setGeometry(QRect(10,245,131,20))
self.line_edit_monto.setStyleSheet(style_line_edit)
self.line_edit_monto.setText("$")
self.line_edit_monto.setPlaceholderText("Ingresa aquí")
self.line_edit_monto.setToolTip("Ingresa el monto de\nla deuda")
# Inicio de la calculadora -------------------------
self.frame_calculator = QFrame(self)
self.frame_calculator.setGeometry(QRect(290,-1500,150,312))
self.frame_calculator.setStyleSheet(style_menu_calculator)
self.shadow2 = QGraphicsDropShadowEffect()
self.shadow2.setBlurRadius(22)
self.frame_calculator.setGraphicsEffect(self.shadow2)
self.display_calculator = QtWidgets.QLineEdit(self.frame_calculator)
self.display_calculator.setGeometry(QRect(5,10,140,50))
self.display_calculator.setStyleSheet(style_display_calculator)
self.display_calculator.setReadOnly(True)
self.display_calculator.setMaxLength(20)
self.display_calculator.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.shadow6 = QGraphicsDropShadowEffect()
self.shadow6.setBlurRadius(22)
self.display_calculator.setGraphicsEffect(self.shadow6)
self.button_back_calculator = QPushButton(self.frame_calculator)
self.button_back_calculator.setGeometry(QRect(25,250,25,25))
self.button_back_calculator.setStyleSheet(style_button_guardar)
self.button_back_calculator.setIcon(QIcon(":/Cancelar/img/Cancelar_rojo.png"))
self.button_back_calculator.setText("")
self.shadow3 = QGraphicsDropShadowEffect()
# self.shadow3.setColor(QColor(255, 13, 25, 160))
self.shadow3.setBlurRadius(36)
self.button_back_calculator.setGraphicsEffect(self.shadow3)
self.button_equal = QPushButton(self.frame_calculator)
self.button_equal.setGeometry(QRect(100,110,25,25))
self.button_equal.setStyleSheet(style_button_equal)
self.button_equal.setText("=")
self.button_equal.setFont(QtGui.QFont("Roboto", 15))
self.shadow4 = QGraphicsDropShadowEffect()
self.shadow4.setBlurRadius(36)
self.button_equal.setGraphicsEffect(self.shadow4)
self.button_equal2 = QPushButton(self.frame_calculator)
self.button_equal2.setGeometry(QRect(100,190,25,25))
self.button_equal2.setStyleSheet(style_button_equal)
self.button_equal2.setText("=")
self.button_equal2.setFont(QtGui.QFont("Roboto", 15))
self.shadow5 = QGraphicsDropShadowEffect()
self.shadow5.setBlurRadius(36)
self.button_equal2.setGraphicsEffect(self.shadow5)
self.label_dolares = QLabel(self.frame_calculator)
self.label_dolares.setGeometry(QRect(20,70,100,40))
self.label_dolares.setStyleSheet(style_labels_calculator)
self.label_dolares.setText("$ a Bs")
self.label_dolares.setFont(QtGui.QFont("Roboto", 12))
self.label_bolivares = QLabel(self.frame_calculator)
self.label_bolivares.setGeometry(QRect(20,150,100,40))
self.label_bolivares.setStyleSheet(style_labels_calculator)
self.label_bolivares.setText("Bs a $")
self.label_bolivares.setFont(QtGui.QFont("Roboto", 12))
self.line_dolares = QLineEdit(self.frame_calculator)
self.line_dolares.setGeometry(QRect(20,100,70,40))
self.line_dolares.setStyleSheet(style_display_calculator)
self.line_dolares.setValidator(QtGui.QDoubleValidator())
self.shadow7 = QGraphicsDropShadowEffect()
self.shadow7.setBlurRadius(22)
self.line_dolares.setGraphicsEffect(self.shadow7)
self.line_bolivares = QLineEdit(self.frame_calculator)
self.line_bolivares.setGeometry(QRect(20,180,70,40))
self.line_bolivares.setStyleSheet(style_display_calculator)
self.line_bolivares.setValidator(QtGui.QDoubleValidator())
self.shadow8 = QGraphicsDropShadowEffect()
self.shadow8.setBlurRadius(22)
self.line_bolivares.setGraphicsEffect(self.shadow8)
# Fin de la calculadora --------------------------------
#self.line_edit_monto.setValidator(QRegExpValidator(QRegExp("[0-9]+[,-.]"),self.line_edit_monto))
self.button_cancelar = QPushButton(self.frame_registro_nuevo)
self.button_cancelar.setGeometry(QRect(50,280,22,22))
self.button_cancelar.setStyleSheet(style_button_guardar)
self.button_cancelar.setIcon(QIcon(":/Cancelar/img/Cancelar_rojo.png"))
self.button_guardar = QPushButton(self.frame_registro_nuevo)
self.button_guardar.setGeometry(QRect(80,280,22,22))
self.button_guardar.setStyleSheet(style_button_guardar)
self.button_guardar.setIcon(QIcon(":/Check/img/Check_azul.png"))
##########################
#Visualizar deudor
self.frame_visualizar = QFrame(self)
self.frame_visualizar.setGeometry(QRect(290,140,0,0))
self.frame_visualizar.setStyleSheet(style_menu_dos)
self.frame_visualizar.setGraphicsEffect(self.shadow)
self.label_nombre_apellido_vz = QLabel(self.frame_visualizar)
self.label_nombre_apellido_vz.setGeometry(QRect(10,10,131,20))
self.label_nombre_apellido_vz.setText("Nombre y Apellido")
self.label_nombre_apellido_vz.setAlignment(Qt.AlignCenter)
self.label_nombre_apellido_vz.setStyleSheet(style_ultimo_registro)
self.line_edit_nombre_apellido_vz = QLineEdit(self.frame_visualizar)
self.line_edit_nombre_apellido_vz.setGeometry(QRect(10,35,131,20))
self.line_edit_nombre_apellido_vz.setStyleSheet(style_line_edit)
self.line_edit_nombre_apellido_vz.setPlaceholderText("Ingresa aquí")
self.line_edit_nombre_apellido_vz.setToolTip("Ingresa el nombre y apellido de\nla persona deudora")
self.label_descripcion_deuda_vz = QLabel(self.frame_visualizar)
self.label_descripcion_deuda_vz.setGeometry(QRect(10,70,131,20))
self.label_descripcion_deuda_vz.setText("Descripción de deuda")
self.label_descripcion_deuda_vz.setAlignment(Qt.AlignCenter)
self.label_descripcion_deuda_vz.setStyleSheet(style_ultimo_registro)
self.text_edit_descripcion_vz = QTextEdit(self.frame_visualizar)
self.text_edit_descripcion_vz.setGeometry(QRect(10,95,131,111))
self.text_edit_descripcion_vz.setPlaceholderText("Ingrese la descrición aquí")
self.text_edit_descripcion_vz.setToolTip("Describa de forma breve y detallada\nla deuda de la persona")
self.text_edit_descripcion_vz.setStyleSheet(style_text_edit)
self.label_monto_deuda_vz = QLabel(self.frame_visualizar)
self.label_monto_deuda_vz.setGeometry(QRect(10,220,131,20))
self.label_monto_deuda_vz.setText("Monto de deuda")
self.label_monto_deuda_vz.setAlignment(Qt.AlignCenter)
self.label_monto_deuda_vz.setStyleSheet(style_ultimo_registro)
self.line_edit_monto_vz = QLineEdit(self.frame_visualizar)
self.line_edit_monto_vz.setGeometry(QRect(10,245,131,20))
self.line_edit_monto_vz.setStyleSheet(style_line_edit)
self.line_edit_monto_vz.setPlaceholderText("Ingresa aquí")
self.line_edit_monto_vz.setToolTip("Ingresa el monto de\nla deuda")
#self.line_edit_monto_vz.setValidator(QRegExpValidator(QRegExp("[0-9]+"),self.line_edit_monto))
self.button_cancelar_vz = QPushButton(self.frame_visualizar)
self.button_cancelar_vz.setGeometry(QRect(50,280,22,22))
self.button_cancelar_vz.setStyleSheet(style_button_guardar)
self.button_cancelar_vz.setIcon(QIcon(":/Cancelar/img/Cancelar_rojo.png"))
self.button_guardar_vz = QPushButton(self.frame_visualizar)
self.button_guardar_vz.setGeometry(QRect(80,280,22,22))
self.button_guardar_vz.setStyleSheet(style_button_guardar)
self.button_guardar_vz.setIcon(QIcon(":/Check/img/Check_azul.png"))
##################
##############################################################
#Eventos click
self.line_edit_buscar.returnPressed.connect(self.buscar_datos)
self.button_buscar.clicked.connect(self.buscar_datos)
self.button_agregar.clicked.connect(self.mostrar_agregar)
self.button_eliminar.clicked.connect(self.eliminar_datos)
self.button_cancelar.clicked.connect(self.funtion_cancelar)
self.button_guardar.clicked.connect(self.Creater_base_datos)
self.button_guardar.clicked.connect(self.insert_datos_db)
self.button_actualizar.clicked.connect(self.mostrar_datos)
self.button_buscar_por.clicked.connect(self.visualizar_calculadora)
self.button_actualizar_dolar.clicked.connect(self.valor_dolar)
self.button_actualizar_dolar.clicked.connect(self.Precio_productos)
self.button_cancelar_vz.clicked.connect(self.funtion_cancelar_vz)
self.button_guardar_vz.clicked.connect(self.Update_datos)
# Calculadora
self.button_back_calculator.clicked.connect(self.ocultar_calculadora)
self.button_equal.clicked.connect(self.evaluacion)
self.button_equal2.clicked.connect(self.evaluacion2)
##############
def aun_no(self):
QMessageBox.critical(self, "Upps!", "Aún no se ha agregado ninguna funcionalidad a este boton!.",
QMessageBox.Ok)
def valor_dolar(self):
try:
resp = requests.get('https://s3.amazonaws.com/dolartoday/data.json',timeout = 3)
print("Connected")
a = json.loads(resp.text)
usd = a['USD']
dolar = usd['transferencia']
self.label_ultimo_registro.setText("Valor del dolar actual: 1$ = "+str(dolar)+"Bs")
self.Precio_productos(dolar)
except exceptions.ConnectionError:
QMessageBox.critical(self, "Error de conexión", "Error al conectar con DolarToday vuelva a cargar o \nComprueba tu conexión a internet.",
QMessageBox.Ok)
self.label_copias.setText('Copias: No conectó ')
self.label_impresiones.setText('Impresiones: No conectó ')
self.label_internet.setText('Uso de internet: No conectó')
self.label_ultimo_registro.setText("Valor del dolar actual: 1$ = No conectó")
print("Not connected")
def Precio_productos(self,dolar):
if dolar:
#print("AAAA",dolar)
copias = 0.17
impresiones = 0.18
internet = 0.16
precio_copias = round(dolar*copias,2)
precio_impresiones = round(dolar*impresiones,2)
precio_internet = round(dolar*internet,2)
#r_1 = round(precio_copias,2)
#print("El r",precio_copias)
#r_2 = precio_impresiones
#r_3 = precio_internet
self.label_copias.setText('Copias: '+str(precio_copias)+"Bs")
self.label_impresiones.setText('Impresiones: '+str(precio_impresiones)+"Bs")
self.label_internet.setText('Uso de internet: '+str(precio_internet)+"Bs")
def Creater_base_datos(self):
if not QFile.exists("Base de datos"):
makedirs("Base de datos")
if QFile.exists("Base de datos"):
if QFile.exists('Base de datos/DB_DEUDORES.db'):
None
else:
try:
with sqlite3.connect('Base de datos/DB_DEUDORES.db') as db:
cursor = db.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS USUARIOS_DEUDORES (ID INTEGER PRIMARY KEY, NOMBRE_APELLIDO TEXT,"
"DESCRIPCION_DEUDA TEXT, MONTO TEXT, FECHA TEXT, HORA TEXT)")
db.commit()
cursor.close()
db.close()
except Exception as e:
print(e)
QMessageBox.critical(self, "Nuevo registro", "Error desconocido.",
QMessageBox.Ok)
else:
None
def insert_datos_db(self):
nombre_apellido = self.line_edit_nombre_apellido.text()
descripcion_deuda = self.text_edit_descripcion.toPlainText()
monto_deudor = self.line_edit_monto.text()
if not nombre_apellido:
self.line_edit_nombre_apellido.setFocus()
elif not descripcion_deuda:
self.text_edit_descripcion.setFocus()
elif not monto_deudor:
self.line_edit_monto.setFocus()
else:
if QFile.exists("Base de datos/DB_DEUDORES.db"):
conexion = sqlite3.connect('Base de datos/DB_DEUDORES.db')
cursor = conexion.cursor()
try:
# Variables de tiempo insertadas en base de datos
hora = time.strftime("%I:%M:%S %p")
fecha_actual = time.strftime("%d/%m/%y")
datos_insertar = [nombre_apellido, descripcion_deuda,monto_deudor,
fecha_actual, hora]
cursor.execute("INSERT INTO USUARIOS_DEUDORES (NOMBRE_APELLIDO,"
"DESCRIPCION_DEUDA, MONTO, FECHA, HORA)"
"VALUES(?,?,?,?,?)",datos_insertar)
conexion.commit()
cursor.close()
conexion.close()
QMessageBox.information(self, "Nuevo deudor", "Deudor registrado.",QMessageBox.Ok)
self.line_edit_nombre_apellido.clear()
self.text_edit_descripcion.clear()
self.line_edit_monto.clear()
self.ocultar_agregar()
except Exception as e:
print(e)
QMessageBox.critical(self, "Nuevo deudor", "Error desconocido.",
QMessageBox.Ok)
else:
None
def Update_datos(self):
nombre_apellido_vz = self.line_edit_nombre_apellido_vz.text()
descripcion_deuda_vz = self.text_edit_descripcion_vz.toPlainText()
monto_deudor_vz = self.line_edit_monto_vz.text()
if QFile.exists("Base de datos/DB_DEUDORES.db"):
conexion = sqlite3.connect('Base de datos/DB_DEUDORES.db')
cursor = conexion.cursor()
try:
hora_vz = time.strftime("%I:%M:%S %p")
fecha_actual_vz = time.strftime("%d/%m/%y")
datos_insertar = [nombre_apellido_vz, descripcion_deuda_vz, monto_deudor_vz,
fecha_actual_vz, hora_vz, self.datos[0]]
print("Estos son los datos:",datos_insertar)
cursor.execute("UPDATE USUARIOS_DEUDORES SET NOMBRE_APELLIDO = ?, DESCRIPCION_DEUDA = ?,"
"MONTO = ?, FECHA = ?, HORA = ? WHERE ID = ?", datos_insertar)
conexion.commit()
cursor.close()
conexion.close()
QMessageBox.information(self, "Actualización de deudor", "Deudor actualizado.",QMessageBox.Ok)
except Exception as e:
print(e)
QMessageBox.critical(self, "Actualización de deudor", "Error desconocido.",
QMessageBox.Ok)
self.line_edit_nombre_apellido_vz.clear()
self.text_edit_descripcion_vz.clear()
self.line_edit_monto_vz.clear()
self.ocultar_visualizar()
def buscar_datos(self):
try:
widget = self.sender().objectName()
if widget in ("Enter", "Buscar"):
cliente = " ".join(self.line_edit_buscar.text().split()).lower()
if len(cliente)== 0:
QMessageBox.critical(self, "Error", "No se ha escrito nada",
QMessageBox.Ok)
if cliente:
sql = "SELECT ID, NOMBRE_APELLIDO,DESCRIPCION_DEUDA, MONTO, FECHA, HORA FROM USUARIOS_DEUDORES WHERE NOMBRE_APELLIDO LIKE ?", ("%"+cliente+"%",)
else:
self.line_edit_buscar.setFocus()
return
else:
self.line_edit_buscar.clear()
sql = "SELECT * FROM USUARIOS_DEUDORES"
if QFile.exists('Base de datos/DB_DEUDORES.db'):
conexion = sqlite3.connect('Base de datos/DB_DEUDORES.db')
cursor = conexion.cursor()
print("Si")
try:
if widget in ("Enter", "Buscar"):
cursor.execute(sql[0], sql[1])
else:
cursor.execute(sql)
datosDevueltos = cursor.fetchall()
conexion.close()
self.Tabla_registro.clearContents()
self.Tabla_registro.setRowCount(0)
if datosDevueltos:
fila = 0
for datos in datosDevueltos:
self.Tabla_registro.setRowCount(fila + 1)
idDato = QTableWidgetItem(str(datos[0]))
idDato.setTextAlignment(Qt.AlignCenter)
self.Tabla_registro.setItem(fila, 0, idDato)
self.Tabla_registro.setItem(fila, 1, QTableWidgetItem(datos[1]))
self.Tabla_registro.setItem(fila, 2, QTableWidgetItem(datos[2]))
self.Tabla_registro.setItem(fila, 3, QTableWidgetItem(datos[3]))
self.Tabla_registro.setItem(fila, 4, QTableWidgetItem(datos[4]))
self.Tabla_registro.setItem(fila, 5, QTableWidgetItem(datos[5]))
fila += 1
else:
QMessageBox.information(self, "Buscar deudor", "No se encontró ", QMessageBox.Ok)
except Exception as e:
print(e)
conexion.close()
QMessageBox.critical(self, "Buscar deudor", "Error desconocido.",
QMessageBox.Ok)
else:
QMessageBox.critical(self, "Buscar deudor", "No se encontró la base de datos.",
QMessageBox.Ok)
self.line_edit_busqueda.setFocus()
except AttributeError:
pass
def eliminar_datos(self):
if QFile.exists("Base de datos/DB_DEUDORES.db"):
msg = QMessageBox()
#msg.setWindowIcon(QIcon('Imagenes-iconos/Icono_window.png'))
msg.setText("¿Está seguro de querer eliminar este deudor?")
msg.setIcon(QMessageBox.Question)
msg.setWindowTitle("Eliminar Usuario")
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
button_si = msg.button(QMessageBox.Yes)
button_si.setText("Si")
button_si.setIcon(QIcon("I:/Check/img/Check_azul.png"))
button_si.setIconSize(QSize(13,13))
button_si.setStyleSheet("QPushButton:hover{background:rgb(0, 170, 255);}\n"
"QPushButton{background:#343a40;\n"
"}")
button_no = msg.button(QMessageBox.No)
button_no.setIcon(QIcon(":/Cancelar/img/Cancelar_rojo.png"))
button_no.setIconSize(QSize(13,13))
button_no.setStyleSheet("QPushButton:hover{background:rgb(0, 170, 255);}\n"
"QPushButton{background:#343a40;}")
msg.setStyleSheet("\n"
"color:#ffffff;\n"
"font-size:12px;\n"
"background-color:#12191D;")
if (msg.exec_() == QMessageBox.Yes):
try:
self.con = sqlite3.connect("Base de datos/DB_DEUDORES.db")
self.cursor = self.con.cursor()
self.Tabla_registro.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
ID = self.Tabla_registro.selectedIndexes()[0].data()
print("has clickeado en", ID)
# Primera Instancia
query = 'DELETE FROM USUARIOS_DEUDORES WHERE ID =?'
self.cursor.execute(query, (ID,))
self.con.commit()
# Seleccionar fila
self.Seleccion = self.Tabla_registro.selectedItems()
# Borrar seleccionado
self.Tabla_registro.removeRow(self.Tabla_registro.currentRow())
except Exception as e:
print(e)
QMessageBox.critical(self, "Error", "No existen deudores para eliminar",
QMessageBox.Ok)
else:
pass
else:
QMessageBox.critical(self, "Eliminar", "No se encontró la base de datos. ",
QMessageBox.Ok)
def mostrar_datos(self):
if QFile.exists("Base de datos/DB_DEUDORES.db"):
try:
self.con = sqlite3.connect("Base de datos/DB_DEUDORES.db")
self.cursor = self.con.cursor()
self.cursor.execute("SELECT ID, NOMBRE_APELLIDO, DESCRIPCION_DEUDA, MONTO, FECHA,HORA FROM USUARIOS_DEUDORES ORDER BY ID")
datos_Devueltos = self.cursor.fetchall()
self.Tabla_registro.clearContents()
self.Tabla_registro.setRowCount(0)
print(datos_Devueltos)
if datos_Devueltos:
row = 0
for datos in datos_Devueltos:
self.Tabla_registro.setRowCount(row + 1)
idDato = QTableWidgetItem(str(datos[0]))
idDato.setTextAlignment(Qt.AlignCenter)
self.Tabla_registro.setItem(row, 0, idDato)
self.Tabla_registro.setItem(row, 1, QTableWidgetItem(datos[1]))
self.Tabla_registro.setItem(row, 2, QTableWidgetItem(datos[2]))
self.Tabla_registro.setItem(row, 3, QTableWidgetItem(datos[3]))
self.Tabla_registro.setItem(row, 4, QTableWidgetItem(datos[4]))
self.Tabla_registro.setItem(row, 5, QTableWidgetItem(datos[5]))
row +=1
else:
QMessageBox.information(self, "Buscar deudor", "No se encontraron deudores", QMessageBox.Ok)
except Exception as e:
print(e)
QMessageBox.critical(self, "Error", "No se ha podido conectar a la base de datos o no existe la base de datos",
QMessageBox.Ok)
else:
QMessageBox.critical(self, "Buscar deudores", "No se encontro la base de datos.",
QMessageBox.Ok)
def Item_click(self,celda):
celda = self.Tabla_registro.selectedItems()
if celda:
indice = celda[0].row()
dato = [self.Tabla_registro.item(indice,i).text()for i in range(5)]
dato_buscar = dato[0]
if dato_buscar:
sql = "SELECT * FROM USUARIOS_DEUDORES WHERE ID LIKE ?", (dato_buscar,)
print("Si")
else:
print("NO")
if QFile.exists("Base de datos/DB_DEUDORES.db"):
conexion = sqlite3.connect("Base de datos/DB_DEUDORES.db")
cursor = conexion.cursor()
try:
cursor.execute(sql[0],sql[1])
datosdevueltos = cursor.fetchall()
for dato in datosdevueltos:
indice = dato[0]
self.mostrar_visualizar(dato)
self.datos = dato
conexion.close()
except Exception as e:
print("A1:",e)
else:
print("Error")
def dolar_value(self):
# try:
resp = requests.get('https://s3.amazonaws.com/dolartoday/data.json',timeout = 3)
a = json.loads(resp.text)
usd = a['USD']
dolar = usd['transferencia']
# except exceptions.ConnectionError:
# QMessageBox.critical(self, "Error de conexión", "Error al conectar con DolarToday vuelva a cargar o \nComprueba tu conexión a internet.",
# QMessageBox.Ok)
return dolar
def visualizar_calculadora(self):
self.animacionMostar = QPropertyAnimation(self.frame_calculator,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_calculator))
self.animacionMostar.setDuration(900)
self.animacionMostar.setStartValue(QRect(290,1500,150,312))
self.animacionMostar.setEndValue(QRect(290,150,150,312))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
def ocultar_calculadora(self):
self.animacionMostar = QPropertyAnimation(self.frame_calculator,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_calculator))
self.animacionMostar.setDuration(200)
self.animacionMostar.setStartValue(QRect(290, 140, 151, 312))
self.animacionMostar.setEndValue(QRect(340, 250, 0, 0))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
self.line_dolares.clear()
self.line_bolivares.clear()
self.display_calculator.clear()
def evaluacion(self):
try:
dolares = int(self.line_dolares.text())
dolar = int(self.dolar_value())
resultado = dolares * dolar
resultado = str(resultado)
self.display_calculator.setText(resultado + " Bs")
except ValueError:
self.display_calculator.setText("SINTAXIS ERROR")
def evaluacion2(self):
try:
bolivares = int(self.line_bolivares.text())
dolar = int(self.dolar_value())
resultado = bolivares / dolar
resultado = str(resultado)
self.display_calculator.setText(resultado + " $")
except ValueError:
self.display_calculator.setText("SINTAXIS ERROR")
def visualizar_dudor(self):
self.animacionMostar = QPropertyAnimation(self.frame_registro_nuevo,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_registro_nuevo))
self.animacionMostar.setDuration(500)
self.animacionMostar.setStartValue(QRect(-1500, 160, 151,312))
self.animacionMostar.setEndValue(QRect(90, 160, 151, 312))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
#funcion de la ventana agregar nuevo deudor
def funtion_cancelar(self):
msg = QMessageBox()
msg.setText("¿Estás seguro de que desea cancelar?")
msg.setIcon(QMessageBox.Question)
msg.setWindowTitle("Cancelar registro")
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
button_si = msg.button(QMessageBox.Yes)
button_si.setText("Si")
button_si.setIcon(QIcon(":/Check/img/Check_azul.png"))
button_si.setIconSize(QSize(13,13))
button_si.setStyleSheet("QPushButton:hover{background:rgb(0, 170, 255);}\n"
"QPushButton{background:#343a40;\n"
"}")
button_no = msg.button(QMessageBox.No)
button_no.setIcon(QIcon(":/Cancelar/img/Cancelar_rojo.png"))
button_no.setIconSize(QSize(13,13))
button_no.setStyleSheet("QPushButton:hover{background:rgb(0, 170, 255);}\n"
"QPushButton{background:#343a40;}")
msg.setStyleSheet("\n"
"color:#ffffff;\n"
"font-size:12px;\n"
"background-color:#12191D;")
if (msg.exec_() == QMessageBox.Yes):
self.ocultar_agregar()
else:
pass
def mostrar_agregar(self):
self.animacionMostar = QPropertyAnimation(self.frame_registro_nuevo,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_registro_nuevo))
self.animacionMostar.setDuration(500)
self.animacionMostar.setStartValue(QRect(-1500, 160, 151,312))
self.animacionMostar.setEndValue(QRect(90, 160, 151, 312))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
def ocultar_agregar(self):
self.animacionMostar = QPropertyAnimation(self.frame_registro_nuevo,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_registro_nuevo))
self.animacionMostar.setDuration(500)
self.animacionMostar.setStartValue(QRect(90, 160, 151, 312))
self.animacionMostar.setEndValue(QRect(-1500, 160, 151, 312))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
def funtion_cancelar_vz(self):
msg = QMessageBox()
msg.setText("¿Estás seguro de que desea cancelar?")
msg.setIcon(QMessageBox.Question)
msg.setWindowTitle("Cancelar registro")
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
button_si = msg.button(QMessageBox.Yes)
button_si.setText("Si")
button_si.setIcon(QIcon(":/Check/img/Check_azul.png"))
button_si.setIconSize(QSize(13,13))
button_si.setStyleSheet("QPushButton:hover{background:rgb(0, 170, 255);}\n"
"QPushButton{background:#343a40;\n"
"}")
button_no = msg.button(QMessageBox.No)
button_no.setIcon(QIcon(":/Cancelar/img/Cancelar_rojo.png"))
button_no.setIconSize(QSize(13,13))
button_no.setStyleSheet("QPushButton:hover{background:rgb(0, 170, 255);}\n"
"QPushButton{background:#343a40;}")
msg.setStyleSheet("\n"
"color:#ffffff;\n"
"font-size:12px;\n"
"background-color:#12191D;")
if (msg.exec_() == QMessageBox.Yes):
self.ocultar_visualizar()
else:
pass
def mostrar_visualizar(self,dato):
datos = dato
self.line_edit_nombre_apellido_vz.setText(str(datos[1]))
self.text_edit_descripcion_vz.setText(str(datos[2]))
self.line_edit_monto_vz.setText(str(datos[3]))
self.animacionMostar = QPropertyAnimation(self.frame_visualizar,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_visualizar))
self.animacionMostar.setDuration(200)
self.animacionMostar.setStartValue(QRect(340, 250, 0,0))
self.animacionMostar.setEndValue(QRect(290, 140, 151, 312))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
def ocultar_visualizar(self):
self.animacionMostar = QPropertyAnimation(self.frame_visualizar,b"geometry")
self.animacionMostar.finished.connect(lambda: (self.frame_visualizar))
self.animacionMostar.setDuration(200)
self.animacionMostar.setStartValue(QRect(290, 140, 151, 312))
self.animacionMostar.setEndValue(QRect(340, 250, 0, 0))
self.animacionMostar.start(QAbstractAnimation.DeleteWhenStopped)
if __name__ == '__main__':
app = QApplication(sys.argv)
Window = main()
Window.show()
app.exec_()
``` |
{
"source": "JoseSM1598/NeuralNetwork_Impl",
"score": 3
} |
#### File: NeuralNetwork_Impl/models/ConvolutinalNeuralNet.py
```python
import numpy as np
def tensorize(x):
return np.squeeze(np.asfarray(x))
# Define a Layer Parent class. Will be used by Relu Layers, SoftMax Layers, and FC Layers
class Layer:
def __init__(self, parms, f, dfdx):
self.parms = [tensorize(p) for p in parms]
self.f = f
self.dfdx = dfdx
self.x = None
def reset(self, r=None):
self.x = None
def getWeights(self):
if len(self.parms) == 0:
return []
else:
return np.concatenate([p.flatten() for p in self.parms])
def setWeights(self, w):
if len(w) > 0:
w = tensorize(w)
for k in range(len(self.parms)):
s = self.parms[k].shape
n = 1 if len(s) == 0 else np.prod(s)
self.parms[k] = np.reshape(w[:n], s)
w = w[n:]
def dfdw(self):
assert self.x is not None, 'dfdw called before f'
return np.empty((len(self.x), 0))
# Implement the FC Layer. FC Layers looks at what high level features
# most strongly correlate to a particular class and has particular weights
class FCLayer(Layer):
def __init__(self, V, b):
V, b = tensorize(V), tensorize(b)
def f(x):
self.x = tensorize(x)
return np.dot(self.parms[0], self.x) + self.parms[1]
def dfdx():
assert self.x is not None, 'dfdx called before f'
return self.parms[0]
Layer.__init__(self, [V, b], f, dfdx)
def dfdw(self):
assert self.x is not None, 'dfdw called before f'
m, n = self.parms[0].shape
D = np.zeros((m, m * (n + 1)))
js, je = 0, n
for i in range(m):
D[i][js:je] = self.x
js, je = js + n, je + n
D[:, (m * n):] = np.diag(np.ones(m))
return D
def __initialWeights(m, n, r=None):
if r is None:
r = np.sqrt(2 / m) # Formula by He et al.
V = np.random.randn(n, m) * r
b = np.zeros(n)
return V, b
@classmethod
def ofShape(cls, m, n, r=None):
V, b = FCLayer.__initialWeights(m, n, r)
return cls(V, b)
def reset(self, r=None):
self.x = None
n, m = self.parms[0].shape
V, b = FCLayer.__initialWeights(m, n, r)
self.parms = [V, b]
# Implement the Relu Layer to be used in the hidden layers
class ReLULayer(Layer):
def __init__(self):
def f(x):
self.x = tensorize(x)
return (np.maximum(0, self.x))
def dfdx():
assert self.x is not None, 'dfdx called before f'
x_arr = np.atleast_1d(self.x) # Turn our x into an array if not already
e = x_arr.size
J = np.zeros([e, e])
diagonal = np.array([0.5 * (1 + np.sign(x_arr))])
if diagonal.shape == (1, 1):
return (diagonal[0])
return (J + np.diag(diagonal[0]))
Layer.__init__(self, [], f, dfdx)
# Define SoftMax Layer, which is used at the end of the network
class SoftmaxLayer(Layer):
def __softmax(x):
e = np.exp(x - np.max(x))
return e / np.sum(e)
def __init__(self, n):
def f(x):
self.x = tensorize(x)
return SoftmaxLayer.__softmax(self.x)
def dfdx():
assert self.x is not None, 'dfdx called before f'
s = SoftmaxLayer.__softmax(self.x)
diagonal = np.diag(np.array([s])[0])
return (np.subtract(diagonal, np.outer(s, s)))
Layer.__init__(self, [], f, dfdx)
# Define the Loss Function. This class defines the *Cross Entropy* Loss function as well as the Jacobian of
# The cross entropy loss
class Loss:
def __init__(self):
self.small = 1e-8
def f(self, y, p):
self.p = tensorize(p)
py = self.p[int(y)]
if py < self.small: py = self.small
return (-np.log(py))
def dfdx(self, y):
assert self.p is not None, 'dfdx called before f'
y = int(y)
d = np.zeros(len(self.p))
py = self.p[y]
if py < self.small: py = self.small
for i in range(len(self.p)):
if (i == y):
d[i] = -1 / (py)
return (d)
# Lastly, define the NEtwork Class! This class takes parameters that tells it how many FC and Relu layers to
# add, as well as defining the input and output layers
# GetWeights() -> Returns the weights of each layer
# SetWeights() -> Sets weights for each layer
# f() -> Returns the activation function of each layer
# backprop() -> Performs backpropoagation to train the network
class Network:
def __init__(self, sizes):
self.layers = []
for i in range(len(sizes) - 1):
self.layers.append(FCLayer.ofShape(sizes[i], sizes[i + 1]))
self.layers.append(ReLULayer())
self.layers.append(SoftmaxLayer(sizes[-1]))
self.p = None
def reset(self, r=None):
for layer in self.layers: layer.reset(r)
self.p = None
def getWeights(self):
return np.concatenate([layer.getWeights() for layer in self.layers])
def setWeights(self, w):
for layer in self.layers:
n = len(layer.getWeights())
layer.setWeights(w[:n])
w = w[n:]
def f(self, x):
x = tensorize(x)
for layer in self.layers: x = layer.f(x)
self.p = x
return self.p
def backprop(self, x, y, loss):
L = loss.f(y, self.f(x))
g = loss.dfdx(y)
g = g.reshape((1, len(g)))
deltan_stor = np.empty((0,)) # Storage for the nth sample to the loss gradient
for k in range(len(self.layers) - 1, -1, -1):
delta_n = np.dot(g, self.layers[k].dfdw())
if delta_n.size > 0:
deltan_stor = np.concatenate((deltan_stor, delta_n[0]))
g = np.dot(g, self.layers[k].dfdx()) # delta of loss with respect to x(k-1)
return ((L, deltan_stor))
# ReadArray Method to read our data in
def readArray(filename):
with open(filename, 'r') as file:
X = np.array([[float(a) for a in line.strip().split()] for line in file])
return X
``` |
{
"source": "JoseSM1598/Personal_Website",
"score": 3
} |
#### File: public/pipeline/log_generator.py
```python
from faker import Faker
from datetime import datetime
import random
import time
#LINE = """\
#{remote_addr} - - [{time_local} +0000] "{request_type} {request_path} HTTP/1.1" {status} {body_bytes_sent} "{http_referer}" "{http_user_agent}"\
#"""
LINE = """\
{first_name} {last_name} - - [{entry_date} +0000] {occupation} {origin_country} {current_residence}\
"""
LOG_FILE_A = "public/pipeline/log_a.txt"
LOG_FILE_B = "public/pipeline/log_b.txt"
#LOG_FILE_A = "log_a.txt"
#LOG_FILE_B = "log_b.txt"
LOG_MAX = 100
def generate_log_line():
fake = Faker()
now = datetime.now()
time_local = now.strftime('%d/%b/%Y:%H:%M:%S')
first_name = fake.first_name().replace(" ", ".")
last_name = fake.last_name().replace(" ", ".")
occupation = fake.job().replace(" ", ".")
origin_country = fake.country().replace(" ", ".")
current_residence = fake.state().replace(" ", ".")
log_line = LINE.format(
first_name = first_name,
last_name = last_name,
entry_date=time_local,
occupation = occupation,
origin_country = origin_country,
current_residence = current_residence
)
return log_line
def write_log_line(log_file, line):
with open(log_file, "a") as f:
f.write(line)
f.write("\n")
def clear_log_file(log_file):
with open(log_file, "w+") as f:
f.write("")
if __name__ == "__main__":
current_log_file = LOG_FILE_A
lines_written = 0
clear_log_file(LOG_FILE_A)
clear_log_file(LOG_FILE_B)
while True:
line = generate_log_line()
write_log_line(current_log_file, line)
lines_written += 1
if lines_written % LOG_MAX == 0:
new_log_file = LOG_FILE_B
if current_log_file == LOG_FILE_B:
new_log_file = LOG_FILE_A
clear_log_file(new_log_file)
current_log_file = new_log_file
sleep_time = random.choice(range(1, 5, 1))
time.sleep(sleep_time)
``` |
{
"source": "JoseSpx/ApiConsultaSunatRUC",
"score": 2
} |
#### File: app/routes/sunat.py
```python
from flask import Blueprint, jsonify, request
from app.helpers.tesseract import read_text_from_image_sunat
from app.helpers.sunatinfo import read_info, convert_sunat_obj
from app import app
SunatRoutes = Blueprint('sunat', __name__)
@SunatRoutes.route('/', methods=['GET'])
def consult_sunat_info():
url_image = app.config['SUNAT_URL_IMG']
ruc = request.args.get('ruc')
if not ruc:
response = jsonify({'message': 'RUC no enviado'})
response.status_code = 400
return response
ruc = ruc.strip()
if len(ruc) != 11:
response = jsonify({'message': 'RUC no válido'})
response.status_code = 400
return response
if ruc[0] != '1' and ruc[0] != '2':
response = jsonify({'message': 'RUC no válido'})
response.status_code = 400
return response
table_info = None
attempts = 0
max_attempts = 50
while not table_info and attempts <= max_attempts:
img_text, cookies = read_text_from_image_sunat(url_image)
try:
table_info = read_info(img_text, ruc, cookies)
except BaseException:
raise
attempts = attempts + 1
if not table_info:
response = jsonify({'message': 'Ocurrio un Error'})
response.status_code = 400
return response
sunat_obj = convert_sunat_obj(table_info, ruc)
response = jsonify(sunat_obj.serialize())
response.status_code = 200
return response
``` |
{
"source": "jose-sv/hogwild_pytorch",
"score": 3
} |
#### File: jose-sv/hogwild_pytorch/compress.py
```python
import os
from tqdm import tqdm
def get_directories():
"""Find and process directories
Returns a list of directories matching the criteria"""
cmd = os.popen(r"ls -l -h | rg -e 'Jun\s+7' -e 'Jun\s+6' | \
rg -e 'sim.*hogwild' | sed -e 's|^.* ||'")
return cmd.read().splitlines()
FNAMES = ['conf.{}'.format(i) for i in range(10)]
FNAMES.append('eval')
def compress(dirs):
"""Iterate over files and compress them"""
with tqdm(unit="Files", total=len(dirs)*11) as pbar:
for cdir in dirs:
for cfile in FNAMES:
if not os.path.exists("{}/{}".format(cdir, cfile)):
cmd = "cd {}; gzip {}".format(cdir, cfile)
output = os.popen(cmd)
for line in output.read().splitlines():
print(line)
pbar.update(1)
def copy(dirs):
"""Iterate over output directories and copy them to shared space"""
for cdir in tqdm(dirs, unit="Output Directories", total=len(dirs)):
cmd = "cp -r /scratch/{} /shared/jose/pytorch/outputs/".format(cdir)
output = os.popen(cmd)
for line in output.read().splitlines():
print(line)
```
#### File: jose-sv/hogwild_pytorch/gen_averages.py
```python
import logging
import argparse
import os
import glob
import csv
from multiprocessing import Pool
from functools import partial
import netstats
def get_runs(runinfo):
return []
if __name__ == '__main__':
FORMAT = '%(message)s [%(levelno)s-%(asctime)s %(module)s:%(funcName)s]'
logging.basicConfig(level=logging.DEBUG, format=FORMAT,
handlers=[logging.StreamHandler()])
parser = argparse.ArgumentParser(description='Process training logs for '
'prediction rate and tolerance plotting')
parser.add_argument('runpath', type=str,
help='Path to the first run. Run info will be '
'extracted from here')
parser.add_argument('--tmp-dir', default='/tmp', type=str,
help='Directory to put temp files in')
args = parser.parse_args()
print(args)
assert(os.path.exists(args.runpath)), f'{args.runpath} not found'
path = '/'.join(args.runpath.split('/')[:-1])
parsed_name = args.runpath.split('/')[-1].split('_')
atk_type = parsed_name[0]
# TODO flavor type
optim_type = parsed_name[1]
batch_size = int(parsed_name[2])
run_info = parsed_name[3].split('-')
atk_batches = int(run_info[0])
target_lbl = int(run_info[1])
bias = float(run_info[2])
step = int(run_info[3].split('.')[0]) # remove tar gz
config_str = f'Type: {atk_type}\n' \
f'Optim: {optim_type}\n' \
f'batch_size: {batch_size}\n' \
f'atk_batches: {atk_batches}\n' \
f'target_lbl: {target_lbl}\n' \
f'bias: {bias}\n' \
f'step: {step}\n'
logging.debug(config_str)
out_fname = f'{atk_type}_{optim_type}_{batch_size}_{atk_batches}' \
f'_{target_lbl}_{bias}'
pattern = f'{path}/{atk_type}_{optim_type}_{batch_size}_{atk_batches}' \
f'-{target_lbl}-{bias}*-*.tar.gz'
matching_files = glob.glob(pattern)
logging.debug('Found: %s', matching_files)
# load and compute all stats
stats_func = partial(netstats.get_all_stats, target_lbl)
mp_pool = Pool(5)
stats = mp_pool.map(stats_func, matching_files)
logging.debug('---loaded and processed; averaging---')
# average stats
avg_counts = {} # allow for different number of entries at each step
avg_pred_rates = {}
avg_val_acc = {}
avg_tol2any = {}
avg_tol2tar = {}
avgs = {'pred_rates': [0]*10, 'val_acc': 0, 'tol2any': [0]*10,
'tol2tar': [0]*10}
for stat in list(stats): # for each stat
for step in stat['pred_rates']:
if step in avg_counts:
avg_counts[step] += 1 # each step shows up once per label
avg_pred_rates[step] = [sum(x) for x in
zip(stat['pred_rates'][step],
avg_pred_rates[step])]
# avg_tol2any[step] = [sum(x) for x in
# zip(stat['tol2any'][step],
# avg_tol2any[step])]
# avg_tol2tar[step] = [sum(x) for x in
# zip(stat['tol2tar'][step],
# avg_tol2tar[step])]
avg_val_acc[step] += stat['val_acc'][step]
logging.debug('val acc %i: %.4f', step, stat['val_acc'][step])
else:
avg_counts[step] = 1 # first label with this step
avg_pred_rates[step] = stat['pred_rates'][step]
# avg_tol2any[step] = stat['tol2any'][step]
# avg_tol2tar[step] = stat['tol2tar'][step]
avg_val_acc[step] = stat['val_acc'][step]
for step in avg_counts:
avg_val_acc[step] /= avg_counts[step]
avg_pred_rates[step] = [x / avg_counts[step] for x in
avg_pred_rates[step]]
print(f'{avg_val_acc[step]:.4f}: {avg_pred_rates[step]}')
with open(f'{out_fname}_config.csv', 'w') as config_file:
conf = csv.writer(config_file)
conf.writerow(['type', 'optim', 'batch_size', 'target', 'bias',
'atk_threads'])
conf.writerow([atk_type, optim_type, batch_size, target_lbl, bias,
atk_batches])
with open(f'{out_fname}_eval.csv', 'w') as eval_file:
evw = csv.writer(eval_file)
evw.writerow(['step', 'avg_val_acc', 'count'])
for step in avg_counts:
evw.writerow([step, avg_val_acc[step], avg_counts[step]])
with open(f'{out_fname}_preds.csv', 'w') as preds_file:
pred = csv.writer(preds_file)
# (get step from eval file)
pred.writerow([f'{lbl}' for lbl in range(10)])
for step in avg_counts:
pred.writerow(avg_pred_rates[step])
```
#### File: jose-sv/hogwild_pytorch/main.py
```python
from __future__ import print_function
import logging
import argparse
import time
import os
import sys
from shutil import rmtree, copy
import tarfile
import errno
import csv
from tqdm import tqdm
import torch # pylint: disable=F0401
import torch.multiprocessing as mp # pylint: disable=F0401
from torchvision import datasets
from models.models.resnet import ResNet18
from train import train, test
# Training settings
parser = argparse.ArgumentParser(description='APA Demonstration')
parser.add_argument('runname', help='name for output files')
# TODO fix default paths
parser.add_argument('--tmp-dir', type=str, default='/tmp',
help="Directory to run out of. Prevents files from being"
"left all over the place, or in case you don't want to run"
"out of NFS")
parser.add_argument('--final-dir', type=str,
default='outputs',
help='Directory to place final outputs in')
# options for simulated attacks
sub_parsers = parser.add_subparsers(dest='mode', help='Sub-Command help')
mlti_sim_prs = sub_parsers.add_parser('simulate-multi',
help='Simulate Stale params APA (No OS)')
mlti_sim_prs.add_argument('--step-size', default=10, type=int, metavar='S',
help='Number of threads at each multi attack stage')
mlti_sim_prs.add_argument('--num-stages', default=10, type=int, metavar='NS',
help='Number of multi attack stages')
lr_sim_prs = sub_parsers.add_parser('simulate',
help='Simulate Stale LR APA (No OS)')
lr_sim_prs.add_argument('--attack-batches', default=1, type=int,
metavar='AB',
help='Number of biased updates to apply')
sub_parsers.add_parser('baseline',
help='Enables CUDA training. '
'Useful for training checkpoints. Do not use for the '
'attack, as training must be CPU based and '
'multithreaded.')
# checkpoint options
ckpt_group = parser.add_argument_group('Checkpoint Options')
# TODO include epoch in checkpoint
ckpt_group.add_argument('--resume', default=-1, type=int, metavar='RE',
help='Use checkpoint, from epoch [RE]')
ckpt_group.add_argument('--attack-checkpoint-path', type=str, default='train',
metavar='CN', help='Checkpoint load/save name')
ckpt_group.add_argument('--baseline-checkpoint-path', type=str, default=None,
metavar='CLN', help="If specified, load from this "
"checkpoint, but don't save to it")
ckpt_group.add_argument('--prepend-logs', type=str, default=None,
metavar='PRE', help='Logs to prepend checkpoint with. '
'Useful for plotting simulations with the baseline')
# TODO implement soft-resume
# ckpt_group.add_argument('--soft-resume', action='store_true', help='Use '
# 'checkpoint iff available')
# training options
train_group = parser.add_argument_group('Training Options')
train_group.add_argument('--max-steps', default=1, type=int, metavar='MS',
help='Number of non-attack epochs to train for. '
'DOES NOT AFFECT SIMULATED ATTACK THREADS.')
train_group.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='Initial learning rate (default: 0.1)')
train_group.add_argument('--num-processes', type=int, default=1, metavar='N',
help='how many training processes to use '
'(default: 2)')
train_group.add_argument('--batch-size', type=int, default=128, metavar='BS',
help='input batch size for training (default: 128)')
train_group.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
train_group.add_argument('--optimizer', type=str, default='sgd',
metavar='OPTIM', choices=['sgd', 'adam', 'rms'])
# attack options
atk_group = parser.add_argument_group('Attack Options; for OS managed and Sim')
atk_group.add_argument('--target', type=int, default=-1, metavar='T',
help='Target label for biased batch. -1 is target-any.')
atk_group.add_argument('--bias', type=float, default=0.2, metavar='B',
help='How biased a batch should be. To simulate an '
'indiscriminate attack, set this value to 0.10 (equal '
' distribution of all labels in each batch)')
def procs_alive(procs):
"""Returns true as long as any worker is alive
Used as a non-blocking join. """
for cp in procs:
if cp.is_alive():
return True
logging.debug('No Process alive')
return False
def setup_outfiles(dirname, final_dir, prepend=None):
"""Call this function with the output directory for logs
If the output directory does not exist, it is created.
If the output directory exists, but has old logs, they are removed.
If using a checkpoint, allows for prepending the old logs to the new ones,
for convenience when graphing."""
if prepend is not None:
assert(prepend != dirname), 'Prepend and output cannot be the same!'
# Create directory and clear files if they exist
if os.path.exists(dirname):
try:
rmtree(dirname)
logging.warning('Removed old output directory (%s)', dirname)
except OSError:
logging.error(sys.exc_info()[0])
sys.exit(1)
os.mkdir(dirname)
if not os.path.exists(final_dir):
os.mkdir(final_dir)
if prepend is not None: # prepending from checkpoint
assert(os.path.exists(prepend)), 'Prepend directory not found'
logging.info('Prepending logs from %s', prepend)
# Make sure prepend path exists, then copy the logs over
log_files = ['eval', 'conf.0', 'conf.1', 'conf.2', 'conf.3', 'conf.4',
'conf.5', 'conf.6', 'conf.7', 'conf.8', 'conf.9']
for cf in log_files:
logging.debug('Current file is %s', cf)
pre_fpath = f'{prepend}/{cf}'
assert(os.path.isfile(pre_fpath)), f"Missing {pre_fpath}"
copy(pre_fpath, f"{dirname}/{cf}")
def setup_and_load():
'''Setup checkpoints directories, and load if necessary'''
mdl = ResNet18().to(device)
# gradients are allocated lazily, so they are not shared here
mdl.share_memory()
# Make sure the directory to save checkpoints already exists
ckpt_dir = f'{args.tmp_dir}'
try:
os.mkdir(ckpt_dir)
logging.info('Created checkpoint directory (%s)', ckpt_dir)
except OSError as e:
if e.errno == errno.EEXIST:
logging.warning('Checkpoint directory already exist (%s)',
ckpt_dir)
else:
raise
# set load checkpoint name - if lckpt is set, use that otherwise use
# the same as the save name
ckpt_fname = f"{ckpt_dir}/{args.attack_checkpoint_path}.ckpt"
bestAcc = None
# load checkpoint if resume epoch is specified
if args.mode == 'simulate' or args.mode == 'simulate-multi':
assert(args.resume != -1), 'Simulate should be used with a checkpoint'
ckpt_load_fname = ckpt_fname if args.baseline_checkpoint_path is None \
else args.baseline_checkpoint_path
assert(os.path.isfile(ckpt_load_fname)), f'{ckpt_load_fname} not found'
checkpoint = torch.load(ckpt_load_fname,
map_location=lambda storage, loc: storage)
mdl.load_state_dict(checkpoint['net'])
bestAcc = checkpoint['acc']
setup_outfiles(outdir, args.final_dir, prepend=args.prepend_logs)
logging.info('Resumed from %s at %.3f', ckpt_load_fname, bestAcc)
else:
# for a full run, nothing to prepend or resume
setup_outfiles(outdir, args.final_dir)
return mdl, bestAcc, ckpt_fname
def inf_iter(procs):
'''Generator for TQDM on list of processes'''
while True:
yield procs_alive(procs)
def launch_atk_proc():
'''When simulating, run the attack thread alone'''
rank = 0
# atk_p = mp.Process(target=train, args=(rank, args, model, device,
# dataloader_kwargs))
# atk_p.start()
log = []
# eval_counter = 0
train(rank, args, model, device, dataloader_kwargs)
# while procs_alive([atk_p]):
# time.sleep(10)
# with tqdm(inf_iter([atk_p]), position=0, desc=f'{args.runname}',
# total=float("inf"), unit='Validation') as tbar:
# # while atk_p.is_alive(): # evaluate and log!
# for p_status in tbar:
# if p_status is False:
# break
#
# # evaluate without logging; logging is done by the worker
# _, val_acc = test(args, model, device, dataloader_kwargs,
# etime=None)
#
# log.append({'vacc': val_acc,
# 'time': eval_counter})
# logging.info('Attack Accuracy is %s', val_acc)
# tbar.set_postfix(acc=val_acc)
# eval_counter += 1
# # update checkpoint
# torch.save({'net': model.state_dict(), 'acc': val_acc},
# ckpt_output_fname)
# evaluate post attack
# If simulated, eval counter is the number of attack batches
# if multi sim, eval counter is the number of stages
if args.mode == 'simulate': # Variant 1 Simulation
post_attack_step = args.attack_batches
else: # Variant 2 Simulation
post_attack_step = args.num_stages
with open(f"{outdir}/eval", 'w') as eval_f:
writer = csv.DictWriter(eval_f, fieldnames=['time', 'vacc'])
for dat in log:
writer.writerow(dat)
return post_attack_step
def launch_procs(eval_counter=0, s_rank=0):
'''Launch normal workers.
If no workers would be spawned, just return. This will happen if
simulating with a single worker --- no recovery time is allowed. '''
if s_rank == args.num_processes:
_, val_acc = test(args, model, device, dataloader_kwargs,
etime=eval_counter)
return val_acc
# Spawn the worker processes. Each runs an independent call of the train
# function
processes = []
for rank in range(s_rank, args.num_processes):
p = mp.Process(target=train, args=(rank, args, model, device,
dataloader_kwargs))
p.start()
processes.append(p)
logging.info('Started %s', p.pid)
log = []
# While any process is alive, continuously evaluate accuracy - the master
# thread is the evaluation thread
with tqdm(inf_iter(processes), position=0, desc='Testing',
total=float("inf"), unit='Validation') as tbar:
for p_status in tbar:
if p_status is False:
break
# log in test
_, val_acc = test(args, model, device, dataloader_kwargs,
etime=eval_counter)
log.append({'vacc': val_acc,
'time': eval_counter})
# tqdm.write(f'Accuracy is {vacc}')
logging.info('Accuracy is %s', val_acc)
eval_counter += 1
tbar.set_postfix(acc=val_acc)
# update checkpoint
torch.save({'net': model.state_dict(), 'acc': val_acc},
ckpt_output_fname)
time.sleep(60)
# open eval log as append in case we're simulating and the attack thread
# added some data
with open(f"{outdir}/eval", 'a') as eval_f:
writer = csv.DictWriter(eval_f, fieldnames=['time', 'vacc'])
for dat in log:
writer.writerow(dat)
# There should be no processes left alive by this point, but do this anyway
# to make sure no orphaned processes are left behind
for proc in processes:
os.system("kill -9 {}".format(proc.pid))
return val_acc
if __name__ == '__main__':
args = parser.parse_args()
FORMAT = '%(message)s [%(levelno)s-%(asctime)s %(module)s:%(funcName)s]'
logging.basicConfig(level=logging.INFO, format=FORMAT,
handlers=[logging.StreamHandler(sys.stdout)])
simulating = False
if args.mode == 'baseline':
logging.info('Running a baseline')
if args.max_steps == 1:
assert(input('Training for a single epoch, is this intentional? '
'Recommended option for SGD is 350 epochs '
'y/[n]') == 'y'), 'Set the --max-steps option.'
elif args.mode == 'simulate':
simulating = True
logging.info('Running an LR simulation')
elif args.mode == 'simulate-multi':
simulating = True
logging.info('Running a multi attack baseline')
else:
logging.info('Running normal training')
# if available, train baselines on the GPU
# TODO support multiple GPU
use_cuda = torch.cuda.is_available()
# pylint: disable=E1101
device = torch.device("cuda" if use_cuda else "cpu")
logging.info('Running on %s', device)
dataloader_kwargs = {'pin_memory': True} if use_cuda else {}
if not args.mode == 'baseline' and \
not simulating and \
args.num_processes < 2:
assert(input('Are you generating a baseline on the CPU? y/[n]') ==
'y'), 'Use at least two processes for the OS based attack.'
mp.set_start_method('spawn')
# Directory to save logs to
# if changed, make sure the name in test_epoch in train.py matches
outdir = f"{args.tmp_dir}/{args.runname}.hogwild"
logging.info('Output directory is %s', outdir)
# setup checkpoint directory and load from checkpoint as needed
model, best_acc, ckpt_output_fname = setup_and_load()
torch.set_num_threads(10) # number of MKL threads for evaluation
# download dataset if not found
logging.debug('Downloading')
datasets.CIFAR10(f'{args.tmp_dir}/data/', train=True, download=True)
# Determine initial checkpoint accuracy
# necessary to get initial confidences
logging.info('Testing')
val_loss, val_accuracy = test(args, model, device, dataloader_kwargs,
etime=-1)
logging.info('Eval acc: %.3f', val_accuracy)
torch.set_num_threads(3) # number of MKL threads for evaluation
start_time = time.time()
# when simulating, attack process is the first to run
if simulating:
if args.attack_checkpoint_path != 'train':
logging.warning('Checkpoint path ignored during simulation')
step = launch_atk_proc()
# attack finished, allow for recovery if more than one worker
if args.num_processes > 1:
launch_procs(step, s_rank=1)
else:
# create status file, in case full attack script is being used
# if this is a baseline, creates the file and updates it but has no
# effect
with open(f'{args.tmp_dir}/{args.runname}.status', 'w') as sfile:
sfile.write('Starting Training\n')
launch_procs()
logging.info('Training run time: %.2f', time.time() - start_time)
# only save checkpoints if not simulating
if not simulating:
torch.set_num_threads(10) # number of MKL threads for evaluation
_, vacc = test(args, model, device, dataloader_kwargs, etime=None)
torch.save({'net': model.state_dict(), 'acc': vacc}, ckpt_output_fname)
copy(ckpt_output_fname, outdir)
# Copy generated logs out of the local directory onto the shared NFS
final_dir = f'{args.final_dir}/{args.runname}.tar.gz'
if os.path.isfile(final_dir):
os.remove(final_dir)
logging.info('Removed old output tar')
# compress output files
with tarfile.open(f'{outdir}.tar.gz', "w:gz") as tar:
tar.add(outdir, arcname=os.path.basename(outdir))
copy(f'{outdir}.tar.gz', final_dir)
logging.info('Copied logs and checkpoint to %s', final_dir)
``` |
{
"source": "josetaas/vendcrawler",
"score": 3
} |
#### File: vendcrawler/tests/test_vendcrawler.py
```python
import unittest
from vendcrawler.scripts.vendcrawler import VendCrawler
class TestVendCrawlerMethods(unittest.TestCase):
def test_get_links(self):
links = VendCrawler('a', 'b', 'c').get_links(2)
self.assertEqual(links,
['https://sarahserver.net/?module=vendor&p=1',
'https://sarahserver.net/?module=vendor&p=2'])
def test_get_page_count(self):
with open('test_vendcrawler.html', 'r') as f:
data = f.read()
page_count = VendCrawler('a', 'b', 'c').get_page_count(str(data))
self.assertEqual(int(page_count), 84)
if __name__ == '__main__':
unittest.main()
```
#### File: vendcrawler/scripts/vendcrawler.py
```python
import code
import os
import os.path
import urllib.request
from json import dumps
from multiprocessing.dummy import Pool as ThreadPool
from re import search
from time import sleep, strftime
import vendcrawler
from vendcrawler.scripts.vendpageparser import VendPageParser
from vendcrawler.scripts.vendcrawlerdb import VendCrawlerDB
class VendCrawler(object):
def __init__(self, user, password, database):
self.vcdb = VendCrawlerDB(user, password, database)
def run(self, interval):
while (True):
print ('Crawling.')
link = 'https://sarahserver.net/?module=vendor'
req = urllib.request.Request(link, headers={'User-Agent':
'Mozilla/5.0'})
with urllib.request.urlopen(req) as response:
page_count = self.get_page_count(response.read().decode('utf-8'))
links = self.get_links(int(page_count))
pool = ThreadPool(4)
results = pool.map(self.parse_link, links)
pool.close()
pool.join()
print ('Saving results.')
self.save_sql(results)
sleep(float(interval))
def parse_link(self, link):
vendpageparser = VendPageParser()
req = urllib.request.Request(link, headers={'User-Agent':
'Mozilla/5.0'})
with urllib.request.urlopen(req) as response:
vendpageparser.feed(response.read().decode('utf-8'))
return vendpageparser.items
def get_links(self, page_count):
links = []
for x in range(1, page_count + 1):
links.append('https://sarahserver.net/?module=vendor&p=' + str(x))
return links
def get_page_count(self, html):
m = search('Found a total of (.*) record\(s\) across (.*) page', html)
return m.group(2)
def save(self, json):
vc_dir = os.path.join(os.path.expanduser('~'), '.vendcrawler')
if (not os.path.isdir(vc_dir)):
os.mkdir(vc_dir)
json_file = strftime('%Y-%m-%d_%H:%M:%S') + '.json'
json_file = os.path.join(vc_dir, json_file)
with open(json_file, 'w') as f:
f.write(json)
def save_sql(self, items_pages):
table = 'items'
columns = ['item_id', 'item_name', 'vendor_id', 'shop_name',
'amount', 'price', 'map', 'datetime']
values = []
for items in items_pages:
for item in items:
value = [int(item['id']),
item['name'],
int(item['vendor_id']),
item['shop'],
int(item['amount'].replace(',', '')),
int(item['price'].replace(',', '')),
item['map'],
item['datetime']]
values.append(value)
self.vcdb.insert(table, columns, values)
```
#### File: vendcrawler/scripts/vendpageparser.py
```python
import time
from collections import OrderedDict
from html.parser import HTMLParser
from re import search
import vendcrawler
class VendPageParser(HTMLParser):
def __init__(self):
super(VendPageParser, self).__init__()
self.in_table = False
self.row = 0
self.col = 0
self.get_data = False
self.items = []
self.reset_current()
def handle_starttag(self, tag, attrs):
if (not self.in_table and tag == 'table'):
for attr in attrs:
if (attr[0] == 'class' and attr[1] == 'horizontal-table'):
self.in_table = True
elif (self.in_table):
if (tag == 'tr'):
self.row += 1
elif (tag == 'td'):
self.col += 1
self.get_data = True
elif (self.col == 2 and tag == 'a'):
for attr in attrs:
if (attr[0] == 'href'):
m = search('id=(.*)', attr[1])
self.current['id'] = m.groups(0)[0]
elif (self.col == 6 and tag == 'a'):
for attr in attrs:
if (attr[0] == 'href'):
m = search('id=(.*)', attr[1])
self.current['vendor_id'] = m.groups(0)[0]
def handle_endtag(self, tag):
if (self.in_table):
if (tag == 'table'):
self.in_table = False
self.get_data = False
elif (tag == 'tr'):
self.col = 0
if (self.row > 1):
self.items.append(self.current)
self.reset_current()
def handle_data(self, data):
if (self.get_data):
if (data.isspace()):
return
data = data.replace('\r', '').replace('\t', '').replace('\n', '')
if (self.col == 2):
self.current['name'] = data
elif (self.col == 3):
self.current['amount'] = data
elif (self.col == 4):
self.current['price'] = data
elif (self.col == 6):
self.current['shop'] = data
elif (self.col == 7):
self.current['map'] = data
def reset_current(self):
self.current = OrderedDict()
self.current['id'] = ''
self.current['name'] = ''
self.current['amount'] = ''
self.current['price'] = ''
self.current['vendor_id'] = ''
self.current['shop'] = ''
self.current['map'] = ''
self.current['datetime'] = time.strftime('%Y-%m-%d %H:%M:%S')
``` |
{
"source": "joseTamezPena/tadpole-algorithms",
"score": 2
} |
#### File: models/benchmark_FRESACAD_R/__init__.py
```python
import pandas as pd
import numpy as np
import os
import sys
from tadpole_algorithms.models.tadpole_model import TadpoleModel
import datetime as dt
from dateutil.relativedelta import relativedelta
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
logger = logging.getLogger(__name__)
#this import is for use R code into Python
from rpy2 import robjects
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
#to transform r to python df
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
from pathlib import Path
class Benchmark_FRESACAD_R(TadpoleModel):
def extractTrainTestDataSets_R(self,
D1D2DataFileName,
D3DataFilneName):
logger.info("Extract Training and Testing sets")
extracTrainTest_RSCRIPT = ""
with open('R_scripts/ExtractTrainTest_tadpole_D1_D2.r', 'r') as file:
extracTrainTest_RSCRIPT = file.read()
sourcePreprocess = robjects.r(extracTrainTest_RSCRIPT)
extractTrainTest_RFUNC = robjects.globalenv['ExtractTrainTest_tadpole_D1_D2']
outR = extractTrainTest_RFUNC(D1D2DataFileName,D3DataFilneName)
D1Train = pd.read_csv("data/_tmp_D1TrainingSet.csv")
D2Test = pd.read_csv("data/_tmp_D2TesingSet.csv")
D3Train = pd.read_csv("data/_tmp_D3TrainingSet.csv")
D3Test = pd.read_csv("data/_tmp_D3TesingSet.csv")
del D1Train["Unnamed: 0"]
del D2Test["Unnamed: 0"]
del D3Train["Unnamed: 0"]
del D3Test["Unnamed: 0"]
## The next code failed to convert Py Pandas to R Data.frame
# with localconverter(ro.default_converter + pandas2ri.converter):
# D1D2DataFileName_R = ro.conversion.py2rpy(D1D2DataFileName)
# with localconverter(ro.default_converter + pandas2ri.converter):
# D3DataFilneName_R = ro.conversion.py2rpy(D3DataFilneName)
# D1Train,D2Test,D3Train,D3Test = extractTrainTest_RFUNC(D1D2DataFileName_R,D3DataFilneName_R)
# with localconverter(ro.default_converter + pandas2ri.converter):
# D1Train_df = ro.conversion.rpy2py(D1Train)
# with localconverter(ro.default_converter + pandas2ri.converter):
# D2Test_df = ro.conversion.rpy2py(D2Test)
# with localconverter(ro.default_converter + pandas2ri.converter):
# D3Train_df = ro.conversion.rpy2py(D3Train)
# with localconverter(ro.default_converter + pandas2ri.converter):
# D3Test_df = ro.conversion.rpy2py(D3Test)
# return D1Train_df,D2Test_df,D3Train_df,D3Test_df
return D1Train,D2Test,D3Train,D3Test
def preproc_with_R(self,
TrainMatrix,
TestMatrix,
Dictionary,
MinVisit=36,
colImputeThreshold=0.25,
rowImputeThreshold=0.10,
includeID=True,
usePreProc=False):
#using the usePreProc flag you can select between use the preprocesed data
logger.info("Prepocess Data Frames")
if usePreProc == False:
dataTADPOLEPreprocesingPy_RSCRIPT = ""
with open('R_scripts/dataTADPOLEPreprocesingPy.r', 'r') as file:
dataTADPOLEPreprocesingPy_RSCRIPT = file.read()
#replace the values on the script with the actual atributes needed (its like pasing arguments in a function)
sourcePreprocess = robjects.r(dataTADPOLEPreprocesingPy_RSCRIPT)
preproc_tadpole_D1_D2_RFUNC = robjects.globalenv['dataTADPOLEPreprocesingPy']
# print(preproc_tadpole_D1_D2_RFUNC.r_repr())
TrainMatrix.to_csv("data/_tmp_TrainMatrix.csv")
TestMatrix.to_csv("data/_tmp_TestMatrix.csv")
Dictionary.to_csv("data/_tmp_Dictionary.csv")
outResult = preproc_tadpole_D1_D2_RFUNC("data/_tmp_TrainMatrix.csv",
"data/_tmp_TestMatrix.csv",
"data/_tmp_Dictionary.csv",
MinVisit,
colImputeThreshold,
rowImputeThreshold,
includeID)
AdjustedTrainFrame = pd.read_csv("data/_tmp_dataTadpole$AdjustedTrainFrame.csv")
testingFrame = pd.read_csv("data/_tmp_dataTadpole$testingFrame.csv")
Train_Imputed = pd.read_csv("data/_tmp_dataTadpole$Train_Imputed.csv")
Test_Imputed = pd.read_csv("data/_tmp_dataTadpole$Test_Imputed.csv")
del AdjustedTrainFrame["Unnamed: 0"]
del testingFrame["Unnamed: 0"]
del Test_Imputed["Unnamed: 0"]
del Train_Imputed["Unnamed: 0"]
return AdjustedTrainFrame,testingFrame,Train_Imputed,Test_Imputed
def Train_Congitive(self,AdjustedTrainFrame,
numberOfRandomSamples=25,
delta=True,
usePreProc=False):
logger.info("Train Cognitive Models")
CognitiveModelsName = "data/_CognitiveClassModels_25.RDATA.RDATA"
if usePreProc == False :
AdjustedTrainFileName = "data/_tmp_AdjustedTrainFrame.csv"
AdjustedTrainFrame.to_csv(AdjustedTrainFileName)
TrainCongitive_RFunction = ""
with open('R_scripts/TrainCognitiveModels.r', 'r') as file:
TrainCongitive_RFunction = file.read()
sourceTrain = robjects.r(TrainCongitive_RFunction)
ContivieTrain_RFUNC = robjects.globalenv['TrainCognitiveModels']
CognitiveModelsName = ContivieTrain_RFUNC(AdjustedTrainFileName,
numberOfRandomSamples,
delta=delta)
return CognitiveModelsName
def Train_Regression(self,AdjustedTrainFrame,
ImputedTrainFrame,
numberOfRandomSamples=50,
usePreProc=False):
logger.info("Train ADAS13 and Ventricles Models")
RegressionModelsName = "data/_RegressionModels_50_Nolog.RDATA"
if usePreProc == False :
AdjustedTrainFileName = "data/_tmp_AdjustedTrainFrame.csv"
AdjustedTrainFrame.to_csv(AdjustedTrainFileName)
ImputedTrainFileName = "data/_tmp_ImputedTrainFrame.csv"
ImputedTrainFrame.to_csv(ImputedTrainFileName)
TrainRegression_RFunction = ""
with open('R_scripts/TrainRegressionModels.r', 'r') as file:
TrainRegression_RFunction = file.read()
sourceTrain = robjects.r(TrainRegression_RFunction)
RegressionTrain_RFUNC = robjects.globalenv['TrainRegressionModels']
RegressionModelsName = RegressionTrain_RFUNC(AdjustedTrainFileName,
ImputedTrainFileName,
numberOfRandomSamples)
return RegressionModelsName
def Forecast_All(self,
CognitiveModelsFileName,
RegressionModelsFileName,
AdjustedTestingFrame,
ImputedTestingFrame,
submissionTemplateFileName,
usePreProc=False):
logger.info("Forecast Congitive Status, ADAS13 and Ventricles")
forecastFilename = "data/_ForecastFRESACAD.csv"
if usePreProc == False :
AdjustedTestFileName = "data/_tmp_AdjustedTestFrame.csv"
AdjustedTestingFrame.to_csv(AdjustedTestFileName)
ImputedTestFileName = "data/_tmp_ImputedTestFrame.csv"
ImputedTestingFrame.to_csv(ImputedTestFileName)
Forecast_RSCRIPT = ""
#Tadpole_D1_D2.to_csv("data/temp/train_df.csv")
with open('R_scripts/ForecastAll.r', 'r') as file:
Forecast_RSCRIPT = file.read()
Forecast_out = robjects.r(Forecast_RSCRIPT)
Forecast_RFUNC = robjects.globalenv['ForecastAll']
forecastFilename = Forecast_RFUNC(CognitiveModelsFileName,
RegressionModelsFileName,
AdjustedTestFileName,
ImputedTestFileName,
submissionTemplateFileName
)
forecastFilename = forecastFilename[0]
print(forecastFilename)
print(type(forecastFilename))
# forecastFilename = str(forecastFilename)
data_path = Path(forecastFilename)
Forecast = pd.read_csv(data_path)
return Forecast
def train(self, train_df):
logger.info("custumtrain")
def predict(self, test_df):
logger.info("Predicting")
#end R functions
```
#### File: models/emc1/evaluate.py
```python
def main(eval_df,flag_D3):
from tadpole_algorithms.evaluation import evaluate_forecast
import pandas as pd
import os
str_exp=os.path.dirname(os.path.realpath(__file__))
os.chdir(str_exp)
f = open("intermediatedata.path", "r")
IntermediateFolder = f.read()
f.close()
if flag_D3==0:
forecast_df=pd.read_excel(IntermediateFolder+'/TADPOLE_Submission_EMC1.xlsx',sheet_name='ID 1')
elif flag_D3==1:
forecast_df=pd.read_excel(IntermediateFolder+'/TADPOLE_Submission_EMC1.xlsx',sheet_name='ID 5')
dictionary = evaluate_forecast(eval_df, forecast_df)
return dictionary
``` |
{
"source": "jose-tapia/Hyper-heuristic-Knapsack",
"score": 3
} |
#### File: Hyper-heuristic-Knapsack/Utils/formatValidation.py
```python
from Utils.IO import loadInstance, obtainFilenames, tapia_path
def validateFormat(path):
# Verify the correctness of the format for each instance
n, W, weights, profits = loadInstance(path)
if n < 0:
print('Error: Size negative')
return False
if W < 0:
print('Error: Capacity negative')
return False
if len(weights) != n or len(profits) != n:
print('Error: Size does not match')
return False
for w, p in zip(weights, profits):
if w < 0 or p < 0:
print('Error: Negative weight or profit')
return False
if w > W:
print('Error: Weight out of limit.')
return False
return True
if __name__ == '__main__':
datasets = ['OrtizBayliss_Train', 'OrtizBayliss_Test', 'Pisinger']
for dataset in datasets:
filenames = obtainFilenames(tapia_path, dataset)
if all(map(validateFormat, filenames)) is False:
print(f'ERROR: A file in {dataset} does not have the correct format.\n')
```
#### File: Hyper-heuristic-Knapsack/Utils/knapsack.py
```python
from typing import List
class Item(object):
def __init__(self, id, w: int, p: int):
self.name = id
self.weight = w
self.profit = p
def getName(self):
return self.name
def getProfit(self):
return self.profit
def getWeight(self):
return self.weight
def getRatio(self):
return self.profit/self.weight
def __str__(self):
return f' Item: {self.name}, <Weight: {str(self.weight)}, Profit: {str(self.profit)}>'
class Knapsack(object):
def __init__(self, W: int):
self.capacity = W
self.value = 0
self.items = []
def getCapacity(self):
return self.capacity
def getValue(self):
return self.value
def getPackedItems(self):
return self.items
def canPack(self, item: Item):
return item.getWeight() <= self.capacity
def pack(self, item: Item):
# Insert the item only if it has the capacity
if item.getWeight() <= self.capacity:
self.capacity -= item.getWeight()
self.value += item.getProfit()
self.items.append(item)
def unpack(self, idx: int):
# Unpack the item
if 0 <= idx and idx < len(self.items):
self.capacity += self.items[idx].getWeight()
self.value -= self.items[idx].getProfit()
return self.items.pop(idx)
else:
return None
def printKnapsack(self):
print(self)
for item in self.items:
print('\t', item)
def copy(self):
# Deep copy of the class
kp_copy = Knapsack(self.capacity)
kp_copy.value = self.value
kp_copy.items = self.items.copy()
return kp_copy
def __str__(self):
return f' Knapsack: <Capacity: {str(self.capacity)}, Value: {str(self.value)}, items: {str(len(self.items))}>'
def generateItemList(weights: List[int], profits: List[int]):
# Convert the list of weights and profits to a list of items
return [Item(id, w, p) for id, (w, p) in enumerate(zip(weights, profits))]
``` |
{
"source": "josetascon/dose-accumulation",
"score": 3
} |
#### File: josetascon/dose-accumulation/transform_referenced_ants_affine.py
```python
import os # os library, used to read files
import argparse # argument parser
from folder_utils import *
def main():
# Arguments details
parser = argparse.ArgumentParser(description='Transform all dose images in a folder to a single reference image. \
Registration with ANTS library')
parser.add_argument("reference", type=str,
help='Reference image file')
parser.add_argument("dose_folder", type=str,
help='Dose folder with images')
parser.add_argument("transform_folder", type=str,
help='Folder with computed transformations')
parser.add_argument("output_folder", type=str,
help='Output folder with images')
parser.add_argument('-d','--debug', action='store_true',
help='Enable debug mode')
parser.add_argument('-v','--verbose', action='store_true',
help='Enable verbose mode')
# Parse arguments
args = parser.parse_args()
dose_folder = args.dose_folder
transform_folder = args.transform_folder
output_folder = args.output_folder
reference = args.reference
# Files organized alphabetically
dose_files = os.listdir(dose_folder)
dose_files.sort()
# Transforms organized
transform_files = os.listdir(transform_folder)
transform_files.sort()
affine_prefix = ['_0Generic']
dfield_prefix = ['_1Warp.nii.gz']
affine_files = filter_folders_prefix(affine_prefix, transform_files)
# dfield_files = filter_folders_prefix(dfield_prefix, transform_files)
# Remove reference dose if inside dose files
numref = os.path.splitext(os.path.basename(reference))[0][-2:] # reference number
indices = [i for i, s in enumerate(dose_files) if numref in s] # index where numref is in dose file
for k in indices:
dose_files.remove(dose_files[k])
print('\nRunning script to transform 3d images based in a reference.')
if args.debug:
print('[Debug Mode]')
if args.verbose:
print('\nReference file: \n' + str(fullpath_to_localpath([reference])) )
print('\nFiles found: \n' + str(dose_files))
print('\nAffine transforms found: \n' + str(affine_files))
# print('\nField transforms found: \n' + str(dfield_files))
# assert(len(dfield_files) == len(affine_files) and len(dfield_files) == len(dose_files))
# Create directory to save output
output_path = os.path.join(output_folder,'affine_doses_ants/') # output path
print('\nCreate output folder:')
os.system('echo mkdir -p ' + output_path ) # echo mkdir
if not args.debug:
os.system('mkdir -p ' + output_path ) # make directory
print('\nTransformations:')
# Script loop
c = 0
k = 0
while c < len(dose_files):
# for k in range(len(dose_files)):
num_img = 'dose' + str(k+1).zfill(2) # the files contain the word dose
doses_to_process = [[i for i, s in enumerate(dose_files) if num_img in s]]
# print(num_img)
# print(doses_to_process)
for d,j in enumerate (doses_to_process[0]):
# Moving image path
moving_image = os.path.join(dose_folder, dose_files[j])
# Output file names
out_prefix = '{}_to_{}.nrrd'.format(os.path.splitext(os.path.basename(dose_files[j]))[0],
os.path.splitext(os.path.basename(reference))[0][-10:])
output = os.path.join(output_path, out_prefix)
affine_transform = os.path.join(transform_folder, affine_files[k])
# dfield_transform = os.path.join(transform_folder, dfield_files[k])
cmd = 'antsApplyTransforms -d 3 \
-i {} -o {} -r {} -t {}'.format( moving_image, output , reference , affine_transform )
# Execute the command
os.system('echo ' + cmd)
if not args.debug:
os.system(cmd)
c += len(doses_to_process[0])
k += 1
print(k,c)
return
if __name__ == "__main__":
# execute only if run as a script
main()
``` |
{
"source": "josete89/serverless-transit-network-orchestrator",
"score": 2
} |
#### File: aws/services/transit_gateway_peering_attachments.py
```python
from botocore.exceptions import ClientError
from lib.decorator import try_except_retry
from aws.utils.boto3_session import Boto3Session
class TgwPeeringAttachmentAPIHandler(Boto3Session):
def __init__(self, logger, region, **kwargs):
self.logger = logger
self.__service_name = 'ec2'
self.region = region
kwargs.update({'region': self.region})
super().__init__(self.logger, self.__service_name, **kwargs)
self.ec2_client = super().get_client()
@try_except_retry()
def describe_transit_gateway_peering_attachments(self,
tgw_id: str,
states: list) -> list:
"""
Describe the tgw peering attachments for the tagged tgw id
:param tgw_id: tgw id of the tagged transit gateway
:param states: use the state to limit the returned response
:return: list of transit gateway peering attachments
"""
try:
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'state',
'Values': states
}
]
)
transit_gateway_peering_attachments_list = response.get(
'TransitGatewayPeeringAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info("Handling Next Token: {}".format(next_token))
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'state',
'Values': states
}
],
NextToken=next_token)
self.logger.info("Extending TGW Peering Attachment List")
transit_gateway_peering_attachments_list \
.extend(response.get('TransitGatewayPeeringAttachments',
[]))
next_token = response.get('NextToken', None)
return transit_gateway_peering_attachments_list
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def create_transit_gateway_peering_attachment(self,
tgw_id: str,
peer_tgw_id: str,
peer_account_id,
peer_region) -> dict:
"""
Create tgw peering attachment
:param tgw_id: REQUIRED - transit gateway id of the local region
:param peer_tgw_id: REQUIRED - id for peer transit gateway hosted in
the peer region
:param peer_account_id: REQUIRED - current account id
:param peer_region: peer region where peer transit gateway is hosted
:return: details for the tgw peering attachment
"""
try:
response = self.ec2_client\
.create_transit_gateway_peering_attachment(
TransitGatewayId=tgw_id,
PeerTransitGatewayId=peer_tgw_id,
PeerAccountId=peer_account_id,
PeerRegion=peer_region,
)
return response.get('TransitGatewayPeeringAttachment')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def delete_transit_gateway_peering_attachment(self,
tgw_attach_id: str) -> str:
"""
Delete tgw peering attachment
:param tgw_attach_id: REQUIRED - transit gateway peering attachment id
:return: current state of the peering attachment
"""
try:
response = self.ec2_client\
.delete_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=tgw_attach_id
)
return response.get('TransitGatewayPeeringAttachment').get('State')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def accept_transit_gateway_peering_attachment(self,
tgw_attach_id: str) -> str:
"""
Accept tgw peering attachment
:param tgw_attach_id: REQUIRED - transit gateway peering attachment id
:return: current state of the peering attachment
"""
try:
response = self.ec2_client\
.accept_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=tgw_attach_id
)
return response.get('TransitGatewayPeeringAttachment').get('State')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def get_transit_gateway_peering_attachment_state(self,
tgw_attachment_id) -> list:
"""
Describe the tgw peering attachments for the tagged tgw id
:param tgw_attachment_id: tgw id of the tagged transit gateway
:return: list of transit gateway peering attachments
"""
try:
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[tgw_attachment_id])
transit_gateway_peering_attachments_list = response.get(
'TransitGatewayPeeringAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info(
"Handling Next Token: {}".format(next_token))
response = self.ec2_client \
.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[tgw_attachment_id],
NextToken=next_token)
self.logger.info("Extending TGW Peering Attachment List")
transit_gateway_peering_attachments_list \
.extend(response.get('TransitGatewayPeeringAttachments',
[]))
next_token = response.get('NextToken', None)
state = transit_gateway_peering_attachments_list[0].get('State')
return state
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
```
#### File: serverless-transit-network-orchestrator/source/lambda_custom_resource.py
```python
from hashlib import md5
from lib.crhelper import cfn_handler
from custom_resource_handler import StepFunctions, CWEventPermissions, \
S3ConsoleDeploy, CFNMetrics, PrefixListIdToArnConverter
from lib.logger import Logger
import os
import inspect
# initialise logger
LOG_LEVEL = os.environ.get('LOG_LEVEL')
if LOG_LEVEL is None:
LOG_LEVEL = 'info'
logger = Logger(loglevel=LOG_LEVEL)
init_failed = False
def create(event, context):
"""
Runs on Stack Creation.
As there is no real 'resource', and it will never be replaced,
PhysicalResourceId is set to a hash of StackId and LogicalId.
"""
s = '%s-%s' % (event.get('StackId'), event.get('LogicalResourceId'))
physical_resource_id = md5(s.encode('UTF-8')).hexdigest()
if event.get('ResourceType') == 'Custom::CWEventPermissions':
cwe = CWEventPermissions(event, logger)
logger.info("Create CW Event Bus Policy - CR Router")
response = cwe.create_permissions()
logger.info("Response from Create Policy CR Handler")
logger.info(response)
return physical_resource_id, response
elif event.get('ResourceType') == 'Custom::ConsoleDeploy':
cd = S3ConsoleDeploy(event, logger)
logger.info("Deploy console content to s3")
cd.upload_console_files()
cd.upload_config_file()
response = None
return physical_resource_id, response
elif event.get('ResourceType') == 'Custom::GetPrefixListArns':
converter_client = PrefixListIdToArnConverter(event, logger)
response = converter_client.get_prefix_list_arns()
logger.info("Response from Get Prefix List Arns - CR Handler")
logger.info(response)
return physical_resource_id, response
elif event.get('ResourceType') == 'Custom::SendCFNParameters':
send = CFNMetrics(event, logger)
send.send_metrics()
response = None
return physical_resource_id, response
else:
logger.error('No valid ResourceType found! Resource type \"'+event['ResourceType']+'\" received', exc_info=True)
raise Exception('No valid ResourceType found! Resource type \"'+event['ResourceType']+'\" received')
def update(event, context):
"""
Runs on Stack Update
"""
physical_resource_id = event['PhysicalResourceId']
if event.get('ResourceType') == 'Custom::CWEventPermissions':
cwe = CWEventPermissions(event, logger)
logger.info("Updating CW Event Bus Policy - CR Router")
response = cwe.update_permissions()
logger.info("Response from Update Policy CR Handler")
logger.info(response)
return physical_resource_id, response
elif event.get('ResourceType') == 'Custom::ConsoleDeploy':
cd = S3ConsoleDeploy(event, logger)
logger.info("Update and deploy customer console config file to s3")
cd.upload_console_files()
cd.upload_config_file()
response = None
return physical_resource_id, response
elif event.get('ResourceType') == 'Custom::GetPrefixListArns':
converter_client = PrefixListIdToArnConverter(event, logger)
response = converter_client.get_prefix_list_arns()
logger.info("Response from Get Prefix List Arns - CR Handler")
logger.info(response)
return physical_resource_id, response
elif event.get('ResourceType') == 'Custom::SendCFNParameters':
send = CFNMetrics(event, logger)
send.send_metrics()
response = None
return physical_resource_id, response
else:
logger.error('No valid ResourceType found! Resource type \"'+event['ResourceType']+'\" received', exc_info=True)
raise Exception('No valid ResourceType found! Resource type \"'+event['ResourceType']+'\" received')
def delete(event, context):
"""
Runs on Stack Delete.
"""
if event.get('ResourceType') == 'Custom::CWEventPermissions':
cwe = CWEventPermissions(event, logger)
logger.info("Deleting CW Event Bus Policy - CR Router")
response = cwe.delete_permissions()
logger.info("Response from Delete Policy CR Handler")
logger.info(response)
return response
elif event.get('ResourceType') == 'Custom::ConsoleDeploy':
logger.info("No action required, returning 'None'")
response = None
return response
elif event.get('ResourceType') == 'Custom::GetPrefixListArns':
logger.info("No action required, returning 'None'")
response = None
return response
elif event.get('ResourceType') == 'Custom::SendCFNParameters':
logger.info("No action required, returning 'None'")
response = None
return response
else:
logger.error('No valid ResourceType found! Resource type \"'+event['ResourceType']+'\" received', exc_info=True)
raise Exception('No valid ResourceType found! Resource type \"'+event['ResourceType']+'\" received')
def lambda_handler(event, context):
# Lambda handler function uses cr helper library to handle CloudFormation services
try:
logger.info("<<<<<<<<<< Custom Resource lambda_handler Event >>>>>>>>>>")
# if the event is from the CloudWatch Events Service then invoke the state machine
if event.get('source') == 'aws.tag' and event.get('detail-type') == 'Tag Change on Resource':
logger.info('Event received from CloudWatch Event Service')
logger.info(event)
state_machine = StepFunctions(event, logger)
state_machine.trigger_state_machine()
# else if the event is from Cloudformation Service
elif event.get('StackId') is not None and 'arn:aws:cloudformation' in event.get('StackId'):
logger.info('Event received from Cloudformation Service')
logger.info(event)
return cfn_handler(event, context, create, update, delete, logger, init_failed)
# else of the event is from Web Application
elif event.get('data') is not None:
logger.info('Event received from Web App - Transit Network Management Console')
logger.info(event)
state_machine = StepFunctions(event.get('data'), logger)
state_machine.trigger_state_machine()
else:
logger.info(event)
logger.error('The event is from an invalid source')
raise Exception('The event is neither from CloudWatch Event service or from Cloudformation service.')
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
logger.exception(message)
raise
```
#### File: source/lib/ec2.py
```python
from botocore.exceptions import ClientError
from lib.decorator import try_except_retry
import boto3
import inspect
class EC2(object):
def __init__(self, logger, region, **kwargs):
self.logger = logger
if kwargs is not None:
if kwargs.get('credentials') is None:
logger.debug("Setting up EC2 BOTO3 Client with default credentials")
self.ec2_client = boto3.client('ec2', region_name=region)
else:
logger.debug("Setting up EC2 BOTO3 Client with ASSUMED ROLE credentials")
cred = kwargs.get('credentials')
self.ec2_client = boto3.client('ec2', region_name=region,
aws_access_key_id=cred.get('AccessKeyId'),
aws_secret_access_key=cred.get('SecretAccessKey'),
aws_session_token=cred.get('SessionToken')
)
else:
logger.info("There were no keyworded variables passed.")
self.ec2_client = boto3.client('ec2', region_name=region)
@try_except_retry()
def describe_regions(self):
try:
response = self.ec2_client.describe_regions()
return response.get('Regions')
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_vpcs(self, vpc_id):
try:
response = self.ec2_client.describe_vpcs(
VpcIds=[vpc_id]
)
vpc_list = response.get('Vpcs', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.describe_vpcs(
VpcIds=[vpc_id],
NextToken=next_token
)
vpc_list.extend(response.get('Vpcs', []))
next_token = response.get('NextToken', None)
return vpc_list # return list - should contain only one item in the list
except ClientError as e:
if e.response['Error']['Code'] == 'OptInRequired':
self.logger.info("Caught exception 'OptInRequired', handling the exception...")
return {"Error": "OptInRequired"}
else:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_subnets(self, subnet_id):
try:
response = self.ec2_client.describe_subnets(
SubnetIds=[subnet_id]
)
subnet_list = response.get('Subnets', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.describe_subnets(
SubnetIds=[subnet_id],
NextToken=next_token
)
subnet_list.extend(response.get('Subnets', []))
next_token = response.get('NextToken', None)
return subnet_list # return list - should contain only one item in the list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_internet_gateways(self, vpc_id):
try:
response = self.ec2_client.describe_internet_gateways(
Filters=[
{
'Name': 'attachment.vpc-id',
'Values': [
vpc_id,
],
},
]
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_availability_zones(self):
try:
response = self.ec2_client.describe_availability_zones(Filters=[{'Name': 'state', 'Values': ['available']}])
return [r['ZoneName'] for r in response['AvailabilityZones']]
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def create_route_cidr_block(self,
vpc_cidr,
route_table_id,
transit_gateway_id):
try:
response = self.ec2_client.create_route(
DestinationCidrBlock=vpc_cidr,
RouteTableId=route_table_id,
TransitGatewayId=transit_gateway_id
)
return response
except ClientError as e:
self.logger.log_unhandled_exception(e)
raise
def delete_route_cidr_block(self, vpc_cidr, route_table_id):
try:
response = self.ec2_client.delete_route(
DestinationCidrBlock=vpc_cidr,
RouteTableId=route_table_id
)
return response
except ClientError as e:
self.logger.log_unhandled_exception(e)
raise
def create_route_prefix_list(self,
prefix_list,
route_table_id,
transit_gateway_id):
try:
response = self.ec2_client.create_route(
DestinationPrefixListId=prefix_list,
RouteTableId=route_table_id,
TransitGatewayId=transit_gateway_id
)
return response
except ClientError as e:
self.logger.log_unhandled_exception(e)
raise
def delete_route_prefix_list(self, prefix_list, route_table_id):
try:
response = self.ec2_client.delete_route(
DestinationPrefixListId=prefix_list,
RouteTableId=route_table_id
)
return response
except ClientError as e:
self.logger.log_unhandled_exception(e)
raise
@try_except_retry()
def describe_route_tables_for_subnet(self, subnet_id):
try:
response = self.ec2_client.describe_route_tables(
Filters=[
{
'Name': 'association.subnet-id',
'Values': [subnet_id]
}
]
)
route_table_list = response.get('RouteTables', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.describe_route_tables(
Filters=[
{
'Name': 'association.subnet-id',
'Values': [subnet_id]
}
],
NextToken=next_token
)
route_table_list.extend(response.get('RouteTables', []))
next_token = response.get('NextToken', None)
return route_table_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def associate_transit_gateway_route_table(self, transit_gateway_route_table_id, transit_gateway_attachment_id):
try:
response = self.ec2_client.associate_transit_gateway_route_table(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
TransitGatewayAttachmentId=transit_gateway_attachment_id
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def create_transit_gateway_vpc_attachment(self, tgw_id, vpc_id, subnet_id, attachment_name):
"""
:param tgw_id:
:param vpc_id:
:param subnet_id:
:return:
{
'TransitGatewayVpcAttachment': {
'TransitGatewayAttachmentId': 'string',
'TransitGatewayId': 'string',
'VpcId': 'string',
'VpcOwnerId': 'string',
'State': 'pendingAcceptance'|'rollingBack'|'pending'|'available'|'modifying'|'deleting'|'deleted'
|'failed'|'rejected'|'rejecting'|'failing',
'SubnetIds': [
'string',
],
'CreationTime': datetime(2015, 1, 1),
'Options': {
'DnsSupport': 'enable'|'disable',
'Ipv6Support': 'enable'|'disable'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
try:
response = self.ec2_client.create_transit_gateway_vpc_attachment(
TransitGatewayId=tgw_id,
VpcId=vpc_id,
SubnetIds=[
subnet_id
],
TagSpecifications=[{ 'ResourceType': 'transit-gateway-attachment',
'Tags':[{ 'Key': 'Name', 'Value': attachment_name }] }]
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def delete_transit_gateway_vpc_attachment(self, tgw_attachment_id):
"""
:param tgw_attachment_id:
:return:
{
'TransitGatewayVpcAttachment': {
'TransitGatewayAttachmentId': 'string',
'TransitGatewayId': 'string',
'VpcId': 'string',
'VpcOwnerId': 'string',
'State': 'pendingAcceptance'|'rollingBack'|'pending'|'available'|'modifying'|'deleting'|'deleted'
|'failed'|'rejected'|'rejecting'|'failing',
'SubnetIds': [
'string',
],
'CreationTime': datetime(2015, 1, 1),
'Options': {
'DnsSupport': 'enable'|'disable',
'Ipv6Support': 'enable'|'disable'
},
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
try:
response = self.ec2_client.delete_transit_gateway_vpc_attachment(
TransitGatewayAttachmentId=tgw_attachment_id
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def get_transit_gateway_vpc_attachment_state(self, tgw_attachment_id):
try:
response = self.ec2_client.describe_transit_gateway_vpc_attachments(
TransitGatewayAttachmentIds=[
tgw_attachment_id
]
)
transit_gateway_vpc_attachments_list = response.get('TransitGatewayVpcAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info("Next Token Returned: {}".format(next_token))
response = self.ec2_client.describe_transit_gateway_vpc_attachments(
TransitGatewayAttachmentIds=[
tgw_attachment_id
],
NextToken=next_token
)
self.logger.info("Extending TGW-VPC Attachment List")
transit_gateway_vpc_attachments_list.extend(response.get('TransitGatewayVpcAttachments', []))
next_token = response.get('NextToken', None)
return transit_gateway_vpc_attachments_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_transit_gateway_vpc_attachments(self, tgw_id, vpc_id, state):
try:
response = self.ec2_client.describe_transit_gateway_vpc_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
},
{
'Name': 'state',
'Values': state
}
]
)
transit_gateway_vpc_attachments_list = response.get('TransitGatewayVpcAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info("Next Token Returned: {}".format(next_token))
response = self.ec2_client.describe_transit_gateway_vpc_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
},
{
'Name': 'state',
'Values': state
}
],
NextToken=next_token
)
self.logger.info("Extending TGW-VPC Attachment List")
transit_gateway_vpc_attachments_list.extend(response.get('TransitGatewayVpcAttachments', []))
next_token = response.get('NextToken', None)
return transit_gateway_vpc_attachments_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_transit_gateway_attachments(self, transit_gateway_attachment_id): #, tgw_id, vpc_id):
try:
response = self.ec2_client.describe_transit_gateway_attachments(
TransitGatewayAttachmentIds=[transit_gateway_attachment_id]
)
transit_gateway_attachments_list = response.get('TransitGatewayAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info("Next Token Returned: {}".format(next_token))
response = self.ec2_client.describe_transit_gateway_attachments(
TransitGatewayAttachmentIds=[transit_gateway_attachment_id],
NextToken=next_token
)
self.logger.info("Extending TGW Attachment List")
transit_gateway_attachments_list.extend(response.get('TransitGatewayAttachments', []))
next_token = response.get('NextToken', None)
return transit_gateway_attachments_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def describe_transit_gateway_route_tables(self, tgw_id):
try:
response = self.ec2_client.describe_transit_gateway_route_tables(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
}
]
)
route_table_list = response.get('TransitGatewayRouteTables', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.describe_transit_gateway_route_tables(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
}
],
NextToken=next_token
)
route_table_list.extend(response.get('TransitGatewayRouteTables', []))
next_token = response.get('NextToken', None)
return route_table_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def disable_transit_gateway_route_table_propagation(self, transit_gateway_route_table_id, transit_gateway_attachment_id):
try:
response = self.ec2_client.disable_transit_gateway_route_table_propagation(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
TransitGatewayAttachmentId=transit_gateway_attachment_id
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def disassociate_transit_gateway_route_table(self, transit_gateway_route_table_id, transit_gateway_attachment_id):
try:
response = self.ec2_client.disassociate_transit_gateway_route_table(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
TransitGatewayAttachmentId=transit_gateway_attachment_id
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def enable_transit_gateway_route_table_propagation(self, transit_gateway_route_table_id, transit_gateway_attachment_id):
try:
response = self.ec2_client.enable_transit_gateway_route_table_propagation(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
TransitGatewayAttachmentId=transit_gateway_attachment_id
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def get_transit_gateway_attachment_propagations(self, transit_gateway_attachment_id):
try:
response = self.ec2_client.get_transit_gateway_attachment_propagations(
TransitGatewayAttachmentId=transit_gateway_attachment_id
)
propagations_list = response.get('TransitGatewayAttachmentPropagations', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.get_transit_gateway_attachment_propagations(
TransitGatewayAttachmentId=transit_gateway_attachment_id,
NextToken=next_token
)
propagations_list.extend(response.get('TransitGatewayAttachmentPropagations', []))
next_token = response.get('NextToken', None)
return propagations_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def get_transit_gateway_route_table_associations(self, transit_gateway_route_table_id,
transit_gateway_attachment_id,
resource_id,
resource_type='vpc'):
try:
response = self.ec2_client.get_transit_gateway_route_table_associations(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
Filters=[
{
'Name': 'transit-gateway-attachment-id',
'Values': [transit_gateway_attachment_id]
},
{
'Name': 'resource-type',
'Values': [resource_type]
},
{
'Name': 'resource-id',
'Values': [resource_id]
}
]
)
associations_list = response.get('Associations', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.get_transit_gateway_route_table_associations(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
NextToken=next_token
)
associations_list.extend(response.get('Associations', []))
next_token = response.get('NextToken', None)
return associations_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
@try_except_retry()
def get_transit_gateway_route_table_propagations(self, transit_gateway_route_table_id):
try:
response = self.ec2_client.get_transit_gateway_route_table_propagations(
TransitGatewayRouteTableId=transit_gateway_route_table_id
)
propagations_list = response.get('TransitGatewayRouteTablePropagations', [])
next_token = response.get('NextToken', None)
while next_token is not None:
response = self.ec2_client.get_transit_gateway_attachment_propagations(
TransitGatewayRouteTableId=transit_gateway_route_table_id,
NextToken=next_token
)
propagations_list.extend(response.get('TransitGatewayRouteTablePropagations', []))
next_token = response.get('NextToken', None)
return propagations_list
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def add_subnet_to_tgw_attachment(self, tgw_attachment_id, subnet_id):
try:
response = self.ec2_client.modify_transit_gateway_vpc_attachment(
TransitGatewayAttachmentId=tgw_attachment_id,
AddSubnetIds=[
subnet_id
]
)
return response
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectState':
self.logger.info("Caught exception 'IncorrectState', handling the exception...")
return {"Error": "IncorrectState"}
if e.response['Error']['Code'] == 'DuplicateSubnetsInSameZone':
self.logger.info("Caught exception 'DuplicateSubnetsInSameZone', handling the exception...")
return {"Error": "DuplicateSubnetsInSameZone",
"Message": str(e)}
else:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def remove_subnet_from_tgw_attachment(self, tgw_attachment_id, subnet_id):
try:
response = self.ec2_client.modify_transit_gateway_vpc_attachment(
TransitGatewayAttachmentId=tgw_attachment_id,
RemoveSubnetIds=[
subnet_id
]
)
return response
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectState':
self.logger.info("Caught exception 'IncorrectState', handling the exception...")
return {"Error": "IncorrectState"}
elif e.response['Error']['Code'] == 'InsufficientSubnetsException':
self.logger.info("Caught exception 'InsufficientSubnetsException', handling the exception...")
return {"Error": "InsufficientSubnetsException"}
else:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def create_tags(self, resource_id, key, value):
try:
response = self.ec2_client.create_tags(
Resources=[
resource_id
],
Tags=[
{
'Key': key,
'Value': value
},
]
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
```
#### File: source/lib/s3.py
```python
import boto3
import inspect
class S3(object):
def __init__(self, logger, **kwargs):
self.logger = logger
if kwargs is not None:
if kwargs.get('credentials') is None:
logger.debug("Setting up S3 BOTO3 Client with default credentials")
self.s3_client = boto3.client('s3')
else:
logger.debug("Setting up S3 BOTO3 Client with ASSUMED ROLE credentials")
cred = kwargs.get('credentials')
self.s3_client = boto3.client('s3',
aws_access_key_id=cred.get('AccessKeyId'),
aws_secret_access_key=cred.get('SecretAccessKey'),
aws_session_token=cred.get('SessionToken')
)
else:
logger.info("There were no keyworded variables passed.")
self.s3_client = boto3.client('s3')
def get_bucket_policy(self, bucket_name):
try:
response = self.s3_client.get_bucket_policy(
Bucket=bucket_name
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def put_bucket_policy(self, bucket_name, bucket_policy):
try:
response = self.s3_client.put_bucket_policy(
Bucket=bucket_name,
Policy=bucket_policy
)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def upload_file(self, bucket_name, local_file_location, remote_file_location):
try:
s3 = boto3.resource('s3')
s3.Bucket(bucket_name).upload_file(local_file_location, remote_file_location)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def download_file(self, bucket_name, remote_file_location, local_file_location):
try:
s3 = boto3.resource('s3')
s3.Bucket(bucket_name).download_file(remote_file_location, local_file_location)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def put_bucket_encryption(self, bucket_name, key_id):
try:
self.s3_client.put_bucket_encryption(
Bucket=bucket_name,
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': key_id
}
},
]
}
)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def copy_object(self, src_bucket_name, key_prefix, src_object_name, dest_bucket_name, dest_object_name=None):
try:
# Construct source bucket/object parameter
copy_source = {'Bucket': src_bucket_name, 'Key': key_prefix + src_object_name}
if dest_object_name is None:
dest_object_name = src_object_name
self.s3_client.copy_object(CopySource=copy_source, Bucket=dest_bucket_name, Key=dest_object_name)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def put_object(self, dest_bucket_name, dest_object_name, src_data):
try:
# Construct Body= parameter
if isinstance(src_data, str):
object_data = src_data
elif isinstance(src_data, bytes):
object_data = open(src_data, 'rb')
self.s3_client.put_object(Bucket=dest_bucket_name, Key=dest_object_name, Body=object_data)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
```
#### File: source/tests/test_tgw_peering_attachment_finder.py
```python
import pytest
from os import environ
from lib.logger import Logger
import tgw_peering_attachment_handler as tgw_pa
logger = Logger('info')
environ['TGW_PEERING_TAG_PREFIX'] = "TgwPeer"
environ['TGW_PEERING_TAG_DELIMITER'] = "Colon (:)"
environ['AWS_REGION'] = 'us-east-1'
peering_attachment_not_found = {
'TgwPeeringAttachmentExist': 'No',
'TgwPeeringAttachmentState': 'does-not-exist'
}
create_event = {
"version": "0",
"id": "748c49490",
"detail-type": "Tag Change on Resource",
"source": "aws.tag",
"account": "11111111",
"time": "2020-07-28T13:43:55Z",
"region": "us-east-1",
"resources": [
"arn:aws:ec2:us-east-1:11111111:transit-gateway/tgw-0a899af2"
],
"detail": {
"changed-tag-keys": [
"TgwPeer:us-east-2:2"
],
"service": "ec2",
"resource-type": "transit-gateway",
"version": 59,
"tags": {
"AWS Solutions": "arn:aws:cloudformation:us-east-1:11111111:stack/stno-v2-va-7/e72317d0-c1f0-11ea-b90c-12cf4a8c2bc2",
"TgwPeer:us-east-2:2": "tgw-031cdc5",
"Name": "STNO-TGW-us-east-1"
}
},
"params": {
"MethodName": "get_transit_gateway_peering_attachment_id"
},
"PeeringTag": "TgwPeer:us-east-2:2",
"IsTgwPeeringTagEvent": "Yes",
"RequestType": "Create",
"TgwId": "tgw-0a899af2",
"PeerTgwId": "tgw-031cdc5",
"PeerRegion": "us-east-2",
"PeerAccountId": "11111111"
}
create_second_peer_attachment_same_region = {
"version": "0",
"id": "2512fc44",
"detail-type": "Tag Change on Resource",
"source": "aws.tag",
"account": "11111111",
"time": "2020-07-28T14:06:00Z",
"region": "us-east-1",
"resources": [
"arn:aws:ec2:us-east-1:11111111:transit-gateway/tgw-0a899af2"
],
"detail": {
"changed-tag-keys": [
"TgwPeer:us-east-2"
],
"service": "ec2",
"resource-type": "transit-gateway",
"version": 60,
"tags": {
"AWS Solutions": "arn:aws:cloudformation:us-east-1:11111111:stack/stno-v2-va-7/e72317d0-c1f0-11ea-b90c-12cf4a8c2bc2",
"TgwPeer:us-east-2": "tgw-0c7e4fe",
"TgwPeer:us-east-2:2": "tgw-031cdc5",
"Name": "STNO-TGW-us-east-1"
}
},
"params": {
"MethodName": "get_transit_gateway_peering_attachment_id"
},
"PeeringTag": "TgwPeer:us-east-2",
"IsTgwPeeringTagEvent": "Yes",
"RequestType": "Create",
"TgwId": "tgw-0a899af2",
"PeerTgwId": "tgw-0c7e4fe",
"PeerRegion": "us-east-2",
"PeerAccountId": "11111111"
}
delete_event = {
"version": "0",
"id": "e5816e33",
"detail-type": "Tag Change on Resource",
"source": "aws.tag",
"account": "11111111",
"time": "2020-07-28T14:21:00Z",
"region": "us-east-1",
"resources": [
"arn:aws:ec2:us-east-1:11111111:transit-gateway/tgw-0a899af2"
],
"detail": {
"changed-tag-keys": [
"TgwPeer:us-east-2:2"
],
"service": "ec2",
"resource-type": "transit-gateway",
"version": 61,
"tags": {
"AWS Solutions": "arn:aws:cloudformation:us-east-1:11111111:stack/stno-v2-va-7/e72317d0-c1f0-11ea-b90c-12cf4a8c2bc2",
"TgwPeer:us-east-2": "tgw-0c7e4fe",
"Name": "STNO-TGW-us-east-1"
}
},
"params": {
"MethodName": "get_transit_gateway_peering_attachment_id"
},
"PeeringTag": "TgwPeer:us-east-2:2",
"IsTgwPeeringTagEvent": "Yes",
"RequestType": "Delete",
"TgwId": "tgw-0a899af2",
"PeerTgwId": "None",
"PeerRegion": "us-east-2",
"PeerAccountId": "11111111"
}
delete_last_attachment_event = {
"version": "0",
"id": "bbcafe51",
"detail-type": "Tag Change on Resource",
"source": "aws.tag",
"account": "11111111",
"time": "2020-07-28T14:41:10Z",
"region": "us-east-1",
"resources": [
"arn:aws:ec2:us-east-1:11111111:transit-gateway/tgw-0a899af2"
],
"detail": {
"changed-tag-keys": [
"TgwPeer:us-east-2"
],
"service": "ec2",
"resource-type": "transit-gateway",
"version": 62,
"tags": {
"AWS Solutions": "arn:aws:cloudformation:us-east-1:11111111:stack/stno-v2-va-7/e72317d0-c1f0-11ea-b90c-12cf4a8c2bc2",
"Name": "STNO-TGW-us-east-1"
}
},
"params": {
"MethodName": "get_transit_gateway_peering_attachment_id"
},
"PeeringTag": "TgwPeer:us-east-2",
"IsTgwPeeringTagEvent": "Yes",
"RequestType": "Delete",
"TgwId": "tgw-0a899af2",
"PeerTgwId": "None",
"PeerRegion": "us-east-2",
"PeerAccountId": "11111111"
}
no_existing_attachment_response = []
existing_attachment_response = [
{
"TransitGatewayAttachmentId": "tgw-attach-0f3c8530",
"RequesterTgwInfo": {
"TransitGatewayId": "tgw-0a899af2",
"OwnerId": "11111111",
"Region": "us-east-1"
},
"AccepterTgwInfo": {
"TransitGatewayId": "tgw-031cdc5",
"OwnerId": "11111111",
"Region": "us-east-2"
},
"Status": {
"Code": "available",
"Message": "Available"
},
"State": "available",
"CreationTime": "2020-07-28T13:43:58+00:00",
"Tags": []
}
]
last_attachment_response = [
{
"TransitGatewayAttachmentId": "tgw-attach-099b887",
"RequesterTgwInfo": {
"TransitGatewayId": "tgw-0a899af2",
"OwnerId": "11111111",
"Region": "us-east-1"
},
"AccepterTgwInfo": {
"TransitGatewayId": "tgw-0c7e4fe",
"OwnerId": "11111111",
"Region": "us-east-2"
},
"Status": {
"Code": "available",
"Message": "Available"
},
"State": "available",
"CreationTime": "2020-07-28T14:06:05+00:00",
"Tags": []
}
]
two_existing_attachment_response = [
{
"TransitGatewayAttachmentId": "tgw-attach-099b887",
"RequesterTgwInfo": {
"TransitGatewayId": "tgw-0a899af2",
"OwnerId": "11111111",
"Region": "us-east-1"
},
"AccepterTgwInfo": {
"TransitGatewayId": "tgw-0c7e4fe",
"OwnerId": "11111111",
"Region": "us-east-2"
},
"Status": {
"Code": "available",
"Message": "Available"
},
"State": "available",
"CreationTime": "2020-07-28T14:06:05+00:00",
"Tags": []
},
{
"TransitGatewayAttachmentId": "tgw-attach-0f3c8530",
"RequesterTgwInfo": {
"TransitGatewayId": "tgw-0a899af2",
"OwnerId": "11111111",
"Region": "us-east-1"
},
"AccepterTgwInfo": {
"TransitGatewayId": "tgw-031cdc5",
"OwnerId": "11111111",
"Region": "us-east-2"
},
"Status": {
"Code": "available",
"Message": "Available"
},
"State": "available",
"CreationTime": "2020-07-28T14:09:05+00:00",
"Tags": []
}
]
def test_create_new_attachment_event():
tgw_client = tgw_pa.TgwTagEventHandler(create_event)
tgw_client.tgw_peering_attachment_id_finder(no_existing_attachment_response,
peering_attachment_not_found)
logger.info(create_event)
assert create_event.get('TgwPeeringAttachmentExist') == 'No'
assert create_event.get('TgwPeeringAttachmentState') == 'does-not-exist'
def test_create_second_new_attachment_event():
tgw_client = tgw_pa.TgwTagEventHandler(create_second_peer_attachment_same_region)
tgw_client.tgw_peering_attachment_id_finder(existing_attachment_response,
peering_attachment_not_found)
logger.info(create_event)
assert create_event.get('TgwPeeringAttachmentExist') == 'No'
assert create_event.get('TgwPeeringAttachmentState') == 'does-not-exist'
def test_create_if_existing_attachment_event():
tgw_client = tgw_pa.TgwTagEventHandler(create_event)
tgw_client.tgw_peering_attachment_id_finder(existing_attachment_response,
peering_attachment_not_found)
logger.info(create_event)
assert create_event.get('TgwPeeringAttachmentExist') == 'Yes'
assert create_event.get('TgwPeeringAttachmentState') == 'available'
assert create_event.get('TgwPeeringAttachmentId') == 'tgw-attach-0f3c8530'
# identify tgw attachment id in deletion workflow
def test_delete_second_attachment_event():
tgw_client = tgw_pa.TgwTagEventHandler(delete_event)
tgw_client.tgw_peering_attachment_id_finder(two_existing_attachment_response,
peering_attachment_not_found)
logger.info(delete_event)
assert delete_event.get('TgwPeeringAttachmentExist') == 'Yes'
assert delete_event.get('TgwPeeringAttachmentState') == 'available'
assert delete_event.get('TgwPeeringAttachmentId') == 'tgw-attach-0f3c8530'
# identify tgw attachment id in deletion workflow
def test_delete_last_attachment_event():
tgw_client = tgw_pa.TgwTagEventHandler(delete_last_attachment_event)
tgw_client.tgw_peering_attachment_id_finder(last_attachment_response,
peering_attachment_not_found)
logger.info(delete_event)
assert delete_last_attachment_event.get('TgwPeeringAttachmentExist') == 'Yes'
assert delete_last_attachment_event.get('TgwPeeringAttachmentState') == 'available'
assert delete_last_attachment_event.get('TgwPeeringAttachmentId') == 'tgw-attach-099b887'
``` |
{
"source": "josethomasd/chatPlus",
"score": 2
} |
#### File: chatPlus/chatPlusPlacesApp/views.py
```python
from flask import render_template
from flask import request
from flask import flash
from flask import redirect
from flask.ext.wtf import Form
from wtforms import StringField, RadioField, SubmitField
from wtforms.validators import DataRequired
from chatPlusPlacesApp import chatPlusPlacesApp
from chatPlusPlacesApp.lib.classes import chatPlusConstants
from chatPlusPlacesApp.lib.classes import chatPlusPlacesJson
#from .forms import searchForm
# index view function suppressed for brevity
class searchForm(Form):
searchString = StringField('searchString', validators=[DataRequired()])
#class searchOption(Form):
# option = request.form['options']
constantsObject = chatPlusConstants.chatPlusConstants
@chatPlusPlacesApp.route('/',methods=['GET', 'POST'])
@chatPlusPlacesApp.route('/index', methods=['GET', 'POST'])
@chatPlusPlacesApp.route('/indexnew', methods=['GET', 'POST'])
def index():
form = searchForm()
if form.validate_on_submit():
searchString = form.searchString.data
print('working')
placesObject = chatPlusPlacesJson.chatPlusPlacesJson()
placeSearch = placesObject.getJSON(searchString)
return render_template('indexnew.html',title='Home',form=form,placeSearch=placeSearch)
#if form.validate_on_submit():
# form = searchOption(request.form)
# searchOption=form.searchOption.data
# placesJson = placesObject.getJSONFinal(searchOption)
# return render_template('index.html',title='Home',form=form,placeJson=placeJson)
return render_template('index.html',title='Home',form=form,placesJson=None)
def indexnew():
if form.validate_on_submit():
form = searchOption(request.form)
print('working1')
earchOption=form.searchOption.data
placesJson = placesObject.getJSONFinal(searchOption)
return render_template('indexnew.html',title='Home',form=form,placeJson=placeJson)
return render_template('index.html',title='Home',form=form,placesJson=None)
``` |
{
"source": "josethomasd/telebot-instagram",
"score": 3
} |
#### File: josethomasd/telebot-instagram/models.py
```python
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
engine = create_engine('sqlite:///mydatabase.db', echo=True)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
insta_id = Column(String, primary_key=True)
username = Column(String)
userid = Column(String)
def __init__(self, insta_id, username, userid):
self.insta_id = insta_id
self.username = username
self.userid = userid
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.insta_id, self.username, self.userid)
users_table = User.__table__
metadata = Base.metadata
if __name__ == "__main__":
metadata.create_all(engine)
``` |
{
"source": "josethomazini/phaser_games",
"score": 2
} |
#### File: phaser_games/scripts/runserver.py
```python
import os
RUN_SERVER_CMD = 'python -m SimpleHTTPServer 8080'
class RunServer:
def __init__(self):
os.system(RUN_SERVER_CMD)
if __name__ == "__main__":
RunServer()
```
#### File: phaser_games/scripts/startobserver.py
```python
import os
import builder
RUN_INOTIFY_CMD = 'while inotifywait -r . -e modify -e move -e create -e delete; do ../../scripts/builder.py; done'
class StartObserver:
def __init__(self):
builder.Builder()
os.system(RUN_INOTIFY_CMD)
if __name__ == "__main__":
StartObserver()
``` |
{
"source": "josethomazini/python-test-template",
"score": 2
} |
#### File: core/tests/test_calc.py
```python
import pytest
from sources.core import calc
from sources.core.exceptions import DivByZeroException
# Turn off linter hint because of the fixture's sintaxe.
# pylint: disable=redefined-outer-name
@pytest.fixture
def eleven():
return 11
@pytest.mark.parametrize('n1, n2, res', [
(1, 2, 3),
(4, 5, 9),
(10, 11, 21),
(22, 23, 45),
])
def test_param_adds(n1, n2, res):
result = calc.add(n1, n2)
assert result == res
def test_sub():
result = calc.sub(10, 3)
assert result == 7
def test_add(eleven):
result = calc.add(eleven, 12)
assert result == 23
def test_div():
result = calc.div(10, 2)
assert result == 5
def test_mul():
result = calc.mul(2, 6)
assert result == 12
def test_div_by_0(eleven):
with pytest.raises(DivByZeroException):
calc.div(eleven, 0)
``` |
{
"source": "josetiznado274/astr-119",
"score": 4
} |
#### File: josetiznado274/astr-119/check_in_solution.py
```python
def main():
i = 0 #setting integer to 0
x = 119.0 #declaring the x float
for i in range(120): #loop i from 0 to 119, inclusive
if((i%2)==0): #if i is even
x += 3. # add 3 to x
else: #if i is odd
x -=5. #subtract 5 from x
s = "%3.2e" % x #make a string containing x with
#sci. notation w/ 2 decimal places
print(s) #prints s to the screen
#rest of program continues
if __name__== "__main__": #if the main() function exists, run it
main()
``` |
{
"source": "JoseTomasTocino/AdventOfCode2020",
"score": 3
} |
#### File: day02/code/main.py
```python
import logging
import re
logger = logging.getLogger(__name__)
def get_policy_components(policy):
policy_match = re.match(r"(\d+)-(\d+)\s+([a-zA-Z]+)", policy)
leftr = int(policy_match.group(1))
rightr = int(policy_match.group(2))
char = policy_match.group(3).strip()
return (leftr, rightr, char)
def check_password_list(l):
correct_passwords = 0
for policy, password in [x.split(": ") for x in l.split("\n")]:
logger.info(f"Policy: {policy}, password: {password}")
policy_min, policy_max, policy_char = get_policy_components(policy)
password_char_count = password.count(policy_char)
if policy_min <= password_char_count <= policy_max:
correct_passwords += 1
return correct_passwords
def check_password_list_with_new_policy(l):
correct_passwords = 0
for policy, password in [x.split(": ") for x in l.split("\n")]:
logger.info(f"Policy: {policy}, password: '{password}'")
first_position, second_position, policy_char = get_policy_components(policy)
char_in_first = password[first_position - 1] == policy_char
char_in_second = password[second_position - 1] == policy_char
if char_in_first != char_in_second:
correct_passwords += 1
return correct_passwords
```
#### File: day03/code/main.py
```python
import functools
import logging
import operator
logger = logging.getLogger(__name__)
def get_map_cell(map_template, row, column):
column = column % len(map_template[0])
return map_template[row][column]
def traverse_map(map_string, slope=[1, 3]):
map_template = map_string.split("\n")
num_rows = len(map_template)
current_position = [0, 0]
num_trees = 0
while current_position[0] < num_rows:
cell_type = get_map_cell(map_template, *current_position)
if cell_type == "#":
num_trees += 1
current_position[0] += slope[0]
current_position[1] += slope[1]
return num_trees
def traverse_map_multiple_slopes(map_string, slopes):
found_trees = []
for slope in slopes:
trees = traverse_map(map_string, slope)
logger.info(f"Traversing with slope: {slope}, found trees: {trees}")
found_trees.append(trees)
return functools.reduce(operator.mul, found_trees)
```
#### File: day03/test/test_main.py
```python
import logging
import os.path
from day03.code.main import traverse_map, get_map_cell, traverse_map_multiple_slopes
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
def test_get_map_cell():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert get_map_cell(map_template.split("\n"), 1, 10) == "."
assert get_map_cell(map_template.split("\n"), 1, 10 + 11) == "."
def test_sample_input():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
logger.info(traverse_map(map_template))
def test_sample_input_custom_slope():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
assert traverse_map(map_template, slope=[1, 1]) == 2
assert traverse_map(map_template, slope=[1, 3]) == 7
assert traverse_map(map_template, slope=[2, 1]) == 2
def test_big_input():
with open(os.path.join(local_path, "input"), "r") as f:
found_trees = traverse_map(f.read())
assert found_trees == 237
def test_sample_input_with_multiple_slopes():
map_template = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
tree_product = traverse_map_multiple_slopes(
map_template, [[1, 1], [1, 3], [1, 5], [1, 7], [2, 1]]
)
assert tree_product == 336
def test_big_input_with_multiple_slopes():
with open(os.path.join(local_path, "input"), "r") as f:
tree_product = traverse_map_multiple_slopes(
f.read(), [[1, 1], [1, 3], [1, 5], [1, 7], [2, 1]]
)
assert tree_product == 2106818610
```
#### File: day04/test/test_main.py
```python
import logging
import os.path
from day04.code.main import validate_passports, validate_field
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
sample_input = """ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in"""
invalid_passports_input = """eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007"""
valid_passports_input = """pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
"""
def test_sample_input():
assert validate_passports(sample_input) == 2
def test_big_input():
with open(os.path.join(local_path, "input"), "r") as f:
valid_passports = validate_passports(f.read())
assert valid_passports == 233
logger.info(f"Valid passports: {valid_passports}")
def test_validate_fields():
assert validate_field("byr", "2002")
assert validate_field("byr", "2003") is False
assert validate_field("hgt", "60in")
assert validate_field("hgt", "190cm")
assert validate_field("hgt", "190in") is False
assert validate_field("hgt", "190") is False
assert validate_field("hcl", "#123abc")
assert validate_field("hcl", "#123abz") is False
assert validate_field("hcl", "123abc") is False
assert validate_field("ecl", "brn")
assert validate_field("ecl", "wat") is False
assert validate_field("pid", "000000001")
assert validate_field("pid", "0123456789") is False
def test_validate_passports_with_valid_fields():
assert validate_passports(valid_passports_input, check_fields=True) == 4
def test_validate_passports_with_invalid_fields():
assert validate_passports(invalid_passports_input, check_fields=True) == 0
def test_big_input_checking_fields():
with open(os.path.join(local_path, "input"), "r") as f:
valid_passports = validate_passports(f.read(), check_fields=True)
assert valid_passports == 111
logger.info(f"Valid passports: {valid_passports}")
```
#### File: day06/code/main.py
```python
import logging
logger = logging.getLogger(__name__)
def count_questions_anyone(in_str):
return sum([len(set(x.replace("\n", ""))) for x in in_str.split("\n\n")])
def count_questions_everyone(in_str):
return sum(
[
len(set.intersection(*[set(x) for x in group.split("\n")]))
for group in in_str.split("\n\n")
]
)
```
#### File: day07/code/main.py
```python
import logging
import re
from dataclasses import dataclass
from pprint import pformat
logger = logging.getLogger(__name__)
pp = lambda x: logger.info(pformat(x))
@dataclass
class BagNode:
name: str
capacity: map
parents: set
def clean_bag_name(bag_name: str) -> tuple:
"""
Parses a string with a bag description and returns a tuple with the components
:param bag_name: a string describing a bag type
:return: a tuple with two elements: the bag count (or 1 if not specified) and the bag name
>>> clean_bag_name("dark red bags")
(1, 'dark red')
>>> clean_bag_name("2 dark violet bags")
(2, 'dark violet')
"""
if bag_name.startswith("no other"):
return 0, None
match = re.match(r"(\d*)\s*([a-z ]+?)\s*bags?", bag_name.strip())
if not match:
return 0, None
bag_type = match.group(2)
bag_count = 1 if not match.group(1) else int(match.group(1))
return bag_count, bag_type
def parse_bag_rules(in_str):
"""
Parses a string with bag composition rules, creating a tree of BagNode items
"""
bag_nodes = {}
rule_definitions = in_str.split(".\n")
for rule in rule_definitions:
rule_subject, rule_raw_content = rule.split(" contain ")
# Parse rule_subject
_, rule_subject = clean_bag_name(rule_subject)
# Parse rule content
rule_content = [clean_bag_name(x) for x in rule_raw_content.split(",")]
bag_capacity = {x[1]: x[0] for x in rule_content}
if None in bag_capacity:
bag_capacity = {}
bag_nodes[rule_subject] = BagNode(
name=rule_subject, capacity=bag_capacity, parents=set()
)
# Compute parents
for bag_type in bag_nodes:
for bag_possible_parent in bag_nodes:
if bag_type in bag_nodes[bag_possible_parent].capacity:
bag_nodes[bag_type].parents.add(bag_possible_parent)
return bag_nodes
def count_shiny_gold_bag_parents(in_str):
"""
Counts the number of ancestors a shiny gold bag can have
"""
bag_nodes = parse_bag_rules(in_str)
current_node = bag_nodes["shiny gold"]
found_parents = set()
potential_nodes = set()
revised_nodes = set()
while True:
revised_nodes.add(current_node.name)
for node in current_node.parents:
if node not in revised_nodes:
potential_nodes.add(node)
if node not in found_parents:
found_parents.add(node)
if not potential_nodes:
break
current_node = bag_nodes[potential_nodes.pop()]
return len(found_parents)
def count_bag_node_children(bag_nodes, bag_type):
"""
Count how many bags a bag of type bag_type can hold
"""
children_count = 0
for child, child_count in bag_nodes[bag_type].capacity.items():
children_count += child_count + child_count * count_bag_node_children(
bag_nodes, child
)
return children_count
def count_shiny_gold_bag_children(in_str):
"""
Counts how many bags a shiny gold bag holds
"""
bag_nodes = parse_bag_rules(in_str)
return count_bag_node_children(bag_nodes, "shiny gold")
```
#### File: day09/code/main.py
```python
import logging
logger = logging.getLogger(__name__)
def find_bad_number(inp, preamble_size):
numbers = [int(x) for x in inp.split("\n")]
number_count = len(numbers)
preamble_start = 0
preamble_end = preamble_size - 1
while preamble_end + 1 < number_count:
target_number = numbers[preamble_end + 1]
# Find operands in preamble
found_operands = False
for i in range(preamble_start, preamble_end + 1):
for j in range(i + 1, preamble_end + 1):
if numbers[i] + numbers[j] == target_number:
found_operands = True
break
if found_operands:
break
if not found_operands:
return target_number
preamble_start += 1
preamble_end += 1
def find_contiguous_sum_set(inp, target_number):
numbers = [int(x) for x in inp.split("\n")]
number_count = len(numbers)
for i in range(number_count):
set_size = 2
# Keep increasing the window size until the size limit is reached or the sum equals the target number
while i + set_size < number_count:
if sum(numbers[i : i + set_size - 1]) == target_number:
return numbers[i : i + set_size - 1]
set_size += 1
return None
```
#### File: day10/code/main.py
```python
import logging
from functools import lru_cache
logger = logging.getLogger(__name__)
def parse_adapter_input(adapters):
# Separate by lines, convert to integer, prepend the initial adapter (0) and append the final adapter (max + 3)
adapters = [0] + sorted(int(x) for x in adapters.split("\n") if x)
adapters.append(max(adapters) + 3)
return adapters
def get_adapter_differences(adapters):
# Given all adapters need to be used, this is just a matter of sorting them and computing the differences
adapters = parse_adapter_input(adapters)
adapters_delta = [adapters[i + 1] - adapters[i] for i in range(len(adapters) - 1)]
return adapters_delta
def get_adapter_path_count(adapters):
# Parse and convert adapters to tuple (because lru_cache decorated functions need hashable arguments)
adapters = tuple(parse_adapter_input(adapters))
return get_adapter_path_count_priv(adapters)
@lru_cache()
def get_adapter_path_count_priv(adapters, current=0):
# Get the next adapter indices
next_indices = [x for x in range(current + 1, current + 4) if x < len(adapters)]
# If there are no more indices, we're at base case so return 1
if not next_indices:
return 1
# Otherwise, sum all branches from matching adapters (according to <= 3 criteria)
return sum(
get_adapter_path_count_priv(adapters, i)
for i in next_indices
if adapters[i] - adapters[current] <= 3
)
```
#### File: day10/test/test_main.py
```python
import logging
import os.path
from day10.code.main import get_adapter_differences, get_adapter_path_count
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
sample_input = None
def test_sample_input():
inp = """16
10
15
5
1
11
7
19
6
12
4"""
differences = get_adapter_differences(inp)
assert (differences.count(3) * differences.count(1)) == 35
assert get_adapter_path_count(inp) == 8
def test_sample_input_2():
inp = """28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3"""
differences = get_adapter_differences(inp)
assert (differences.count(3) * differences.count(1)) == 220
assert get_adapter_path_count(inp) == 19208
def test_big_input():
with open(os.path.join(local_path, "input"), "r") as f:
content = f.read()
differences = get_adapter_differences(content)
logger.info(f"Part 1 solution: {differences.count(3) * differences.count(1)}")
logger.info(f"Part 2 solution: {get_adapter_path_count(content)}")
``` |
{
"source": "JoseTomasTocino/cmis_storage",
"score": 3
} |
#### File: cmis_storage/cmis_storage/views.py
```python
import os.path
from django.http import HttpResponse
from cmis_storage.storage import CMISStorage
def get_file(request, path):
"""
Returns a file stored in the CMIS-compatible content management system
:param path: The full path of the file within the CMS
"""
_, filename = os.path.split(path)
storage = CMISStorage()
stream = storage.open_stream(path)
response = HttpResponse()
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response.write(stream.read())
return response
``` |
{
"source": "JoseTomasTocino/toptal-calculator",
"score": 4
} |
#### File: toptal-calculator/calculator/main.py
```python
from calculator import evaluator
def main():
expression = None
notation = None
while expression is None or expression.strip() == '':
expression = input("Please enter the expression: ")
while notation not in ['y', 'n']:
notation = input("Use postfix notation? [y/n]: ")
print("Output:", evaluator.evaluate(expression))
if __name__ == '__main__':
# logging.basicConfig(level=logging.DEBUG)
main()
```
#### File: toptal-calculator/server/server.py
```python
import logging
import os
import sys
from flask import Flask, request, jsonify, current_app
from flask_cors import CORS
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from calculator.evaluator import evaluate
app = Flask(__name__, static_folder='static')
logging.basicConfig(level=logging.DEBUG)
CORS(app)
@app.route("/", methods=['GET'])
def index():
return current_app.send_static_file('index.html')
@app.route("/evaluate", methods=['GET'])
def eval():
expression = request.args.get('expression', '')
notation = request.args.get('notation', 'standard')
try:
retval = {
'expression': expression,
'notation': notation,
'result': evaluate(expression, notation == 'rpn'),
'error': False,
'error_str': ''
}
except RuntimeError as e:
retval = {
'expression': expression,
'notation': notation,
'result': '',
'error': True,
'error_str': str(e)
}
except BaseException as e:
retval = {
'expression': expression,
'notation': notation,
'result': '',
'error': True,
'error_str': "Other error: " + str(e)
}
return jsonify(retval)
if __name__ == "__main__":
app.run(debug=True)
```
#### File: toptal-calculator/test/test_postfix.py
```python
import unittest
from calculator import tokens, evaluator
from calculator.parser import tokenize, infix_to_postfix
class MyTestPostfixCase(unittest.TestCase):
def test_simple_operator(self):
expression = "2 + 1"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.PlusOperatorToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_multiple_operators(self):
expression = "2 + 1 * 5"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.OperandToken(5),
tokens.ProductOperatorToken(),
tokens.PlusOperatorToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_multiple_operators_reversed(self):
expression = "2 * 1 + 5"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.ProductOperatorToken(),
tokens.OperandToken(5),
tokens.PlusOperatorToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_parenthesis(self):
expression = "2 * (1 + 5)"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.OperandToken(5),
tokens.PlusOperatorToken(),
tokens.ProductOperatorToken()
]
self.assertListEqual(postfix_token_list, token_list)
def test_missing_left_parenthesis(self):
expression = "2 * 2) + 1 + 5"
computed_token_list = tokenize(expression)
with self.assertRaises(RuntimeError):
postfix_token_list = infix_to_postfix(computed_token_list)
def test_missing_right_parenthesis(self):
expression = "2 * (1 + 5"
computed_token_list = tokenize(expression)
with self.assertRaises(RuntimeError):
postfix_token_list = infix_to_postfix(computed_token_list)
def test_simple_function(self):
expression = "sin 5"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(5),
tokens.SinFunctionToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_equation_in_postfix_not_allowed(self):
with self.assertRaises(RuntimeError):
evaluator.evaluate('(5 + 2)', True)
with self.assertRaises(RuntimeError):
evaluator.evaluate('x + 1', True)
with self.assertRaises(RuntimeError):
evaluator.evaluate('x = 5', True)
```
#### File: toptal-calculator/test/test_tokens.py
```python
import math
import unittest
from calculator import tokens
class TestTokens(unittest.TestCase):
def test_print_token(self):
token = tokens.PlusOperatorToken()
self.assertEqual(str(token), "Token: PlusOperatorToken")
self.assertEqual(repr(token), "Token: PlusOperatorToken")
token = tokens.LogFunctionToken(has_custom_base=False)
self.assertEqual(str(token), "Token: LogFunctionToken (10-base)")
token = tokens.LogFunctionToken(has_custom_base=True)
self.assertEqual(str(token), "Token: LogFunctionToken (Custom base)")
token = tokens.OperandToken(5)
self.assertEqual(str(token), "Token: Operand (5)")
token = tokens.VariableToken('x')
self.assertEqual(str(token), "Token: Variable (x)")
def test_token_equality(self):
t0 = tokens.PlusOperatorToken()
t1 = tokens.PlusOperatorToken()
self.assertEqual(t0, t1)
t1 = tokens.MinusOperatorToken()
self.assertNotEqual(t0, t1)
def test_ctan_operation(self):
token = tokens.CtanFunctionToken()
self.assertAlmostEqual(token.oper(math.pi / 2), 1 / math.tan(math.pi / 2))
``` |
{
"source": "josetorrs/CS4400",
"score": 4
} |
#### File: josetorrs/CS4400/backend.py
```python
from afinn import Afinn
from matplotlib.figure import Figure
from matplotlib import rcParams
from pandas import DataFrame
from sqlite3 import connect
from twitterscraper.query import query_tweets
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'F0F0F0'})
def backend(scrape, topic, begin_date, end_date, min_likes, min_retweets):
"""
Create database tables if they don't exist, insert query entry and return an
analysis of corresponding tweets
:param scrape:
:param topic: user inputted keyword
:param begin_date:
:param end_date:
:param min_likes: 0 if no user input
:param min_retweets: 0 if no user input
:return: sentiment analysis of tweets
"""
if scrape is True:
tweets = scrape_tweets(query=topic, begin_date=begin_date, end_date=end_date)
else:
tweets = None
with connect('database.db') as connection:
create_tables(connection=connection)
query_id = insert_query(connection=connection, query=(topic, begin_date, end_date, min_likes, min_retweets))
if tweets is not None:
insert_tweets(connection=connection, tweets=tweets)
insert_sampled(connection=connection, query_id=query_id)
return analyze_tweets(connection=connection, query_id=query_id)
def scrape_tweets(query, begin_date, end_date):
"""
:param query: user input query
:param begin_date:
:param end_date:
:return: None if no matching keywords else pandas dataframe of tweets
"""
limit = None
lang = 'english'
filters = ['tweet_id', 'text', 'timestamp', 'likes', 'retweets', 'user_id', 'screen_name']
tweets = query_tweets(query, limit=limit, lang=lang, begindate=begin_date, enddate=end_date)
if len(tweets) > 0:
data_frame = DataFrame(tweet.__dict__ for tweet in tweets)[filters]
data_frame.dropna()
return data_frame
else:
return None
def create_tables(connection):
"""
Creates database tables for schema
:param connection: database connection
:return: created database tables
"""
cursor = connection.cursor()
cursor.executescript("CREATE TABLE IF NOT EXISTS Query (\n"
" QueryId INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n"
" Stamp DATETIME NOT NULL DEFAULT (DATETIME('now', 'utc')),\n"
" Topic TEXT NOT NULL,\n"
" StartDate DATE NOT NULL,\n"
" EndDate DATE NOT NULL,\n"
" MinLikes INTEGER NOT NULL CHECK (MinLikes >= 0),\n"
" MinRetweets INTEGER NOT NULL CHECK (MinRetweets >= 0)\n"
");\n"
"CREATE TABLE IF NOT EXISTS Handle (\n"
" HandleId INTEGER NOT NULL PRIMARY KEY,\n"
" Username TEXT NOT NULL UNIQUE\n"
");\n"
"CREATE TABLE IF NOT EXISTS Tweet (\n"
" TweetId INTEGER NOT NULL PRIMARY KEY,\n"
" Post TEXT NOT NULL,\n"
" Sentiment REAL NOT NULL,\n"
" Stamp DATETIME NOT NULL,\n"
" NumLikes INTEGER NOT NULL CHECK (NumLikes >= 0),\n"
" NumRetweets INTEGER NOT NULL CHECK (NumRetweets >= 0),\n"
" HandleId INTEGER NOT NULL,\n"
" FOREIGN KEY (HandleId) REFERENCES Handle(HandleId)\n"
");\n"
"CREATE TABLE IF NOT EXISTS Sampled (\n"
" QueryId INTEGER,\n"
" TweetId INTEGER,\n"
" FOREIGN KEY (QueryId) REFERENCES Query(QueryId),\n"
" FOREIGN KEY (TweetId) REFERENCES Tweet(TweetId)\n"
");")
def insert_query(connection, query):
"""
Inserts a query into the corresponding database table
:param connection: database connection
:param query: user query
:return: last row of the table
"""
cursor = connection.cursor()
sql = ("INSERT INTO Query(Topic, StartDate, EndDate, MinLikes, MinRetweets)\n"
"VALUES(?, ?, ?, ?, ?);")
values = query
cursor.execute(sql, values)
return cursor.lastrowid
def insert_tweets(connection, tweets):
"""
Inserts tweets into a database connection
:param connection: database connection
:param tweets: list of tweets
:return: None
"""
cursor = connection.cursor()
analysis = Afinn()
for _, tweet in tweets.iterrows():
sql = ("INSERT INTO Handle(HandleId, Username)\n"
"VALUES(?, ?);")
values = (tweet['user_id'], tweet['screen_name'])
try:
cursor.execute(sql, values)
except:
pass # repeat entry
sentiment = analysis.score(tweet['text'])
stamp = tweet['timestamp'].to_pydatetime()
sql = ("INSERT INTO Tweet(TweetId, Post, Sentiment, Stamp, NumLikes, NumRetweets, HandleId)\n"
"VALUES(?, ?, ?, ?, ?, ?, ?);")
values = (tweet['tweet_id'], tweet['text'], sentiment, stamp,
tweet['likes'], tweet['retweets'], tweet['user_id'])
try:
cursor.execute(sql, values)
except:
pass # repeat entry
def insert_sampled(connection, query_id):
"""
Inserts query and its info into the database connection
:param connection: database connection (sqlite3)
:param query_id:
:return: new data inserted into database
"""
cursor = connection.cursor()
sql = ("INSERT INTO Sampled(QueryId, TweetId)\n"
"SELECT ?, TweetId\n"
"FROM Tweet, Query\n"
"WHERE QueryId = ?\n"
"AND LOWER(Post) LIKE ('%' || LOWER(Topic) || '%')\n"
"AND DATE(Tweet.Stamp) >= StartDate\n"
"AND DATE(Tweet.Stamp) <= EndDate\n"
"AND NumLikes >= MinLikes\n"
"AND NumRetweets >= MinRetweets;")
values = (query_id, query_id)
cursor.execute(sql, values)
def analyze_tweets(connection, query_id):
"""
Analyze tweets based on sentiment then produces corresponding graphs
:param connection: database connection
:param query_id: tweet query
:return: list of sentiment graph
"""
cursor = connection.cursor()
analysis = {}
sql = ("SELECT SampleSize, AvgSentiment, NumPositive, NumNegative FROM"
"\n"
"(SELECT COUNT(Sentiment) AS SampleSize, AVG(Sentiment) AS AvgSentiment\n"
"FROM Sampled AS S\n"
"JOIN Query AS Q ON Q.QueryId = S.QueryId\n"
"JOIN Tweet AS T ON T.TweetId = S.TweetId\n"
"WHERE Q.QueryId = ?\n"
"GROUP BY Q.QueryId)"
",\n"
"(SELECT COUNT(Sentiment) AS NumPositive\n"
"FROM Sampled AS S\n"
"JOIN Query AS Q ON Q.QueryId = S.QueryId\n"
"JOIN Tweet AS T ON T.TweetId = S.TweetId\n"
"WHERE Q.QueryId = ?\n"
"AND Sentiment > 0\n"
"GROUP BY Q.QueryId)"
",\n"
"(SELECT COUNT(Sentiment) AS NumNegative\n"
"FROM Sampled AS S\n"
"JOIN Query AS Q ON Q.QueryId = S.QueryId\n"
"JOIN Tweet AS T ON T.TweetId = S.TweetId\n"
"WHERE Q.QueryId = ?\n"
"AND Sentiment < 0\n"
"GROUP BY Q.QueryId);")
values = (query_id, query_id, query_id)
cursor.execute(sql, values)
result = cursor.fetchone()
if result is None:
analysis['sample size'] = '0'
analysis['sentiment'] = '0'
analysis['positive'] = '0%'
analysis['negative'] = '0%'
analysis['figure 0'] = Figure()
analysis['figure 1'] = Figure()
analysis['figure 2'] = Figure()
return analysis
analysis['sample size'] = result[0]
analysis['sentiment'] = f'{result[1]:.3f}'
analysis['positive'] = f"{(result[2] / result[0]):.3f}%"
analysis['negative'] = f"{(result[3] / result[0]):.3f}%"
sql = ("SELECT Sentiment, COUNT(Sentiment)\n"
"FROM Sampled AS S\n"
"JOIN Query AS Q ON Q.QueryId = S.QueryId\n"
"JOIN Tweet AS T ON T.TweetId = S.TweetId\n"
"WHERE Q.QueryId = ?\n"
"GROUP BY Sentiment;"
,
"SELECT STRFTIME('%Y', T.Stamp) AS Year, AVG(Sentiment)\n"
"FROM Sampled AS S\n"
"JOIN Query AS Q ON Q.QueryId = S.QueryId\n"
"JOIN Tweet AS T ON T.TweetId = S.TweetId\n"
"WHERE Q.QueryId = ?\n"
"GROUP BY Year;"
,
"SELECT (NumLikes + NumRetweets), Sentiment\n"
"FROM Sampled AS S\n"
"JOIN Query AS Q ON Q.QueryId = S.QueryId\n"
"JOIN Tweet AS T ON T.TweetId = S.TweetId\n"
"WHERE Q.QueryId = ?;")
values = (query_id,)
title = ('Sentiment Distribution', 'Sentiment Over Time', 'Sentiment vs Popularity')
x_label = ('sentiment', 'year', 'popularity (likes + retweets)')
y_label = ('tweets', 'sentiment', 'sentiment')
for i in range(3):
cursor.execute(sql[i], values)
result = cursor.fetchall()
figure = Figure()
subplot = figure.add_subplot()
if i == 0:
subplot.bar(*zip(*result))
elif i == 1:
subplot.plot(*zip(*result))
figure.autofmt_xdate()
else:
subplot.scatter(*zip(*result))
subplot.title.set_text(title[i])
subplot.set_xlabel(x_label[i])
subplot.set_ylabel(y_label[i])
analysis[f"figure {i}"] = figure
return analysis
``` |
{
"source": "josetorrs/thee-flying-chicken",
"score": 3
} |
#### File: NLTK/tutorial/short_analysis.py
```python
import nltk
import pickle
import random
from nltk.classify.scikitlearn import SklearnClassifier
#from nltk.corpus import movie_reviews
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.linear_model import LogisticRegression,SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.tokenize import word_tokenize
from nltk.classify import ClassifierI
from statistics import mode
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self.classifiers = classifiers
def classify(self, features):
votes = []
for c in self.classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self.classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes/len(votes)
return conf
short_pos = open("/Users/danielconway/thee-flying-chicken/src/NLTK/tutorial/short_reviews/positive.txt", "r").read()
short_neg = open("/Users/danielconway/thee-flying-chicken/src/NLTK/tutorial/short_reviews/negative.txt", "r").read()
documents = []
for r in short_pos.split("\n"):
documents.append((r, "pos"))
for r in short_neg.split("\n"):
documents.append((r, "neg"))
all_words = []
short_pos_words = word_tokenize(short_pos)
short_neg_words = word_tokenize(short_neg)
for w in short_pos_words:
all_words.append(w.lower())
for w in short_neg_words:
all_words.append(w.lower())
# word:frequency in order
all_words = nltk.FreqDist(all_words)
'''
# prints 15 most common words
print(all_words.most_common(15))
# prints # of occurences
print(all_words["stupid"])
'''
word_features = list(all_words.keys())[:5000]
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
#print((find_features(movie_reviews.words('neg/cv000_29416.txt'))))
featuresets = [(find_features(rev), catagory) for (rev,catagory) in documents]
random.shuffle(featuresets)
# if dissable random shuffle, if you only test against only one half, it will only test against pos or negative (fist and last half)
# 10,000 and something feature sets
training_set = featuresets[:10000]
testing_set = featuresets[10000:]
# posterior = prior occurences * likelyhood / current evidence
#classifier = nltk.NaiveBayesClassifier.train(training_set)
classifier = nltk.NaiveBayesClassifier.train(training_set)
print("Original Naive Bayes Classifier Accuracy Percent: ", nltk.classify.accuracy(classifier, testing_set)*100 )
classifier.show_most_informative_features(15)
'''
### pickling ###
save_classifier = open("_name_.pickle", "wb")
pickle.dump(classifier, save_classifier)
save_classifier.close()
##loading##
pickle_in = open('_name_.pickle','rb')
new_variable = pickle.load(pickle_in)
'''
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)
'''
GaussianNB_classifier = SklearnClassifier(GaussianNB())
GaussianNB_classifier.train(training_set)
print("GaussianNB_classifier accuracy percent:", (nltk.classify.accuracy(GaussianNB_classifier, testing_set))*100)
'''
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(training_set)
print("BernoulliNB_classifier Accuracy Percent: ", nltk.classify.accuracy(BernoulliNB_classifier, testing_set)*100 )
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(training_set)
print("SGDClassifier_classifier accuracy percent:", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100)
SVC_classifier = SklearnClassifier(SVC())
SVC_classifier.train(training_set)
print("SVC_classifier accuracy percent:", (nltk.classify.accuracy(SVC_classifier, testing_set))*100)
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(training_set)
print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)
NuSVC_classifier = SklearnClassifier(NuSVC())
NuSVC_classifier.train(training_set)
print("NuSVC_classifier accuracy percent:", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100)
voted_classifier = VoteClassifier(classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier,
SVC_classifier,
LinearSVC_classifier,
NuSVC_classifier)
print("voted_classifier accuracy percent:", (nltk.classify.accuracy(voted_classifier, testing_set))*100)
print("Classification: ", voted_classifier.classify(testing_set[0][0]), "Confidence %: " , voted_classifier.confidence(testing_set[0][0])*100)
print("Classification: ", voted_classifier.classify(testing_set[1][0]), "Confidence %: " , voted_classifier.confidence(testing_set[1][0])*100)
print("Classification: ", voted_classifier.classify(testing_set[2][0]), "Confidence %: " , voted_classifier.confidence(testing_set[2][0])*100)
print("Classification: ", voted_classifier.classify(testing_set[3][0]), "Confidence %: " , voted_classifier.confidence(testing_set[3][0])*100)
print("Classification: ", voted_classifier.classify(testing_set[4][0]), "Confidence %: " , voted_classifier.confidence(testing_set[4][0])*100)
print("Classification: ", voted_classifier.classify(testing_set[5][0]), "Confidence %: " , voted_classifier.confidence(testing_set[5][0])*100)
``` |
{
"source": "jose-turintech/mlflow-aws-custom-image-build",
"score": 2
} |
#### File: mlflow_custom_serving/conf/app_conf.py
```python
from pydantic import Field
from mlflow_custom_serving.conf.base_default_conf import BaseDefaultConf, conf_factory
# ───────────────────────────────────────────────────────────────────────────────────────────── #
# APP Configuration #
# ───────────────────────────────────────────────────────────────────────────────────────────── #
class AppConf(BaseDefaultConf):
"""
This class contains the configuration attributes of the application
The attributes of this class are updated with the values of the environment variables.
"""
app_env: str = Field(None, description="Name of the configured deployment environment")
app_group: str = Field('turintech', description="Name of the group to which the application belongs.")
app_name: str = Field('mlflow_custom_serving', description="Application name")
app_version: str = Field('0.0.0', description="Application version")
app_id: str = Field(None, description="Name that identifies the deployed Application")
# ───────────────────────────────────────────────────────────────────────────────────────────── #
# APP Configuration Factory #
# ───────────────────────────────────────────────────────────────────────────────────────────── #
def app_conf_factory(_env_file: str = '.env', prefix: str = None, defaults: dict = None, **kwargs) -> AppConf:
"""
This is a factory generating an AppConf class specific to a service, loading every value from a generic
.env file storing variables in uppercase with a service prefix.
example .env:
PREFIX_APP_ENV='DEV'
PREFIX_APP_VERSION='1.0.0'
...
"""
return conf_factory(config_class=AppConf, _env_file=_env_file, prefix=prefix, defaults=defaults, **kwargs)
```
#### File: mlflow_custom_serving/utils/file_utils.py
```python
import json
from pathlib import Path
from typing import List, Dict, Callable, Union, Any
from pydantic import BaseModel
from mlflow_custom_serving.utils.data_utils import JsonType
# ───────────────────────────────────────────────────────────────────────────────────────────── #
# JSON File utils #
# ───────────────────────────────────────────────────────────────────────────────────────────── #
def read_json(file_path: str or Path) -> JsonType:
""" Return a JSON file data """
if not Path(file_path).exists():
raise FileNotFoundError(file_path)
with open(str(file_path), encoding='utf-8') as file:
return json.load(file)
def read_json_as_data_model(
file_path: str or Path, data_model: Callable[[Union[Any, BaseModel]], BaseModel]
) -> List[BaseModel] or BaseModel:
"""
Read a JSON file and return the information in the indicated data structure
:param file_path: JSON file path
:param data_model: (Callable[..., BaseModel]) Class type inheriting from BaseModel to instantiate.
:return: data_model type
"""
data = read_json(file_path=file_path)
return data_model(**data) if isinstance(data, dict) else json_as_data_model_list(data=data, data_model=data_model)
def json_as_data_model_list(
data: List[Dict], data_model: Callable[[Union[Any, BaseModel]], BaseModel]
) -> List:
""" Return the information in the indicated data structure """
return list(map(lambda value: data_model(**value), data))
```
#### File: tests_mlflow_custom_serving/tests_conf/test_conf_manager.py
```python
import pytest
from mlflow_custom_serving.conf.conf_manager import conf_mgr, ConfManager
from mlflow_custom_serving.conf.data_conf import DataConf
from mlflow_custom_serving.conf.logger_conf import FileLoggerConf
from tests_mlflow_custom_serving.base.base_test import BaseTest
from tests_mlflow_custom_serving.conftest import ROOT_PATH, TESTS_ENV_PATH, data_mgr
# ───────────────────────────────────────────────────────────────────────────────────────────── #
# Test Class #
# ───────────────────────────────────────────────────────────────────────────────────────────── #
class TestConfManager(BaseTest):
"""
Configuration Manager testing class
"""
def test_init(self):
"""
Validation of the ConfManager.__init__
"""
conf1 = ConfManager()
conf2 = ConfManager(env_file=TESTS_ENV_PATH)
self.case.assertEqual(str(ROOT_PATH.joinpath(*['deploy', '.env'])), conf1.env_file)
self.case.assertEqual(str(TESTS_ENV_PATH), conf2.env_file)
def test_update_conf_mgr(self):
"""
Validation of the ConfManager.update_conf_mgr
"""
conf = ConfManager()
self.case.assertEqual(str(ROOT_PATH.joinpath(*['deploy', '.env'])), conf.env_file)
conf.update_conf_mgr(env_file=str(TESTS_ENV_PATH))
self.case.assertEqual(str(TESTS_ENV_PATH), conf.env_file)
conf.update_conf_mgr(env_file='.env')
self.case.assertIsNone(conf.env_file)
def test_app_paths(self):
"""
Validation of the ConfManager path configurations
"""
expected = ROOT_PATH
self.case.assertEqual(expected, conf_mgr.path_root, "path_root")
expected = ROOT_PATH.joinpath('deploy')
self.case.assertEqual(expected, conf_mgr.path_deploy, "path_deploy")
expected = ROOT_PATH.joinpath('src')
self.case.assertEqual(expected, conf_mgr.path_src, "path_src")
expected = expected.joinpath('mlflow_custom_serving')
self.case.assertEqual(expected, conf_mgr.path_app, "path_app")
expected = expected.joinpath('conf')
self.case.assertEqual(expected, conf_mgr.path_conf, "path_conf")
def test_logging_conf(self):
"""
Validation of the logging_conf property
"""
expected = FileLoggerConf(_env_file=TESTS_ENV_PATH, sink=conf_mgr.defaults_logging_conf.get('sink'),
level="debug", defaults=dict(unknown="unknown")).dict()
self.case.assertDictEqual(expected, conf_mgr.logging_conf.dict())
conf_mgr._logging_conf = None
self.case.assertDictEqual(expected, conf_mgr.logging_conf.dict())
def test_data_conf(self):
"""
Validation of the data_conf property
"""
expected = DataConf(_env_file=TESTS_ENV_PATH, data_path=data_mgr.data_path,
defaults=dict(unknown="unknown")).dict()
self.case.assertDictEqual(expected, conf_mgr.data_conf.dict())
conf_mgr._data_conf = None
self.case.assertDictEqual(expected, conf_mgr.data_conf.dict())
# ───────────────────────────────────────────────────────────────────────────────────────────── #
if __name__ == "__main__":
pytest.main()
``` |
{
"source": "jose-turintech/mlflow-turing-scoring-server",
"score": 2
} |
#### File: src/mlflow_turing_scoring_server/main.py
```python
from datetime import datetime
from time import sleep
from loguru import logger
from mlflow_turing_scoring_server.conf.conf_manager import conf_mgr
# ───────────────────────────────────────────────────────────────────────────────────────────── #
def print_conf():
logger.info("Configuration Manager")
logger.info(f" - path_root : {conf_mgr.path_root}")
logger.info(f" - path_src : {conf_mgr.path_src}")
logger.info(f" - path_app : {conf_mgr.path_app}")
logger.info(f" - path_conf : {conf_mgr.path_conf}")
logger.info(f" - env_file : {conf_mgr.env_file}")
def main():
while True:
sleep(5)
logger.info(datetime.now())
if __name__ == '__main__':
print_conf()
main()
``` |
{
"source": "joseuscamayta/joseuscamayta.github.io",
"score": 3
} |
#### File: joseuscamayta.github.io/files/web_scraping_tasas_sbs.py
```python
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
website='https://www.sbs.gob.pe/app/pp/CurvaSoberana/Curvas_Consulta_Historica.asp'
path='C:/Users/JOSE/Downloads/chromedriver_win32/chromedriver'
driver = webdriver.Chrome(path)
driver.get(website)
fechas_cc=['30/11/2005', '15/08/2006']
fechas_cc[1]
fechas_cc=['15/08/2006','30/11/2005']
def extraer_tasas():
driver = webdriver.Chrome(path)
for i in range(0,2):
driver = webdriver.Chrome(path)
driver.get(website)
dropdown= Select(driver.find_element_by_id('as_tip_curva'))
dropdown.select_by_visible_text('Curva Soberana Soles')
dropdown= Select(driver.find_element_by_id('as_fec_cons'))
dropdown.select_by_visible_text(fechas_cc[i])
boton_consultar=driver.find_element_by_xpath("//*[@id='Consultar']")
boton_consultar.click()
time.sleep(10)
driver.close()
extraer_tasas()
``` |
{
"source": "Josevaldo10/pythonbirds",
"score": 4
} |
#### File: pythonbirds/oo/pessoa.py
```python
class Pessoa:
olhos = 2
def __init__(self,*filhos, nome=None, idade=40):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimetar(self):
return f'Olá meu nome é {self.nome}'
@staticmethod
def metodo_statico():
return 42
@classmethod
def nome_e_atributo_de_classe(cls):
return f'{cls} - olhos {cls.olhos}'
class Homem(Pessoa):
def cumprimetar(self):
cumprimentar_da_classe=super().cumprimetar()
return f'{cumprimentar_da_classe}.Aperto de mão!'
class Mutante(Pessoa):
olhos = 3
if __name__ == '__main__':
josevaldo = Mutante(nome='Josevaldo')
joviniano = Homem(josevaldo, nome='Joviniano')
print(Pessoa.cumprimetar(joviniano))
print(id(joviniano))
print(joviniano.cumprimetar())
print(joviniano.nome)
print(joviniano.idade)
for filho in joviniano.filhos:
print(filho.nome)
joviniano.sobrenome = 'Pereira'
del joviniano.filhos
joviniano.olhos = 1
del joviniano.olhos
print(joviniano.__dict__)
print(josevaldo.__dict__)
print(Pessoa.olhos)
print(joviniano.olhos)
print(josevaldo.olhos)
print(id(Pessoa.olhos)),print(id(joviniano.olhos)), print(id(josevaldo.olhos))
print(Pessoa.metodo_statico(), joviniano.metodo_statico())
print(Pessoa.nome_e_atributo_de_classe(), joviniano.nome_e_atributo_de_classe())
pessoa = Pessoa('Anonimo')
print(isinstance(pessoa, Homem))
print(isinstance(josevaldo, Homem))
print(isinstance(josevaldo, Pessoa))
print(josevaldo.olhos)
print(joviniano.cumprimetar())
print(josevaldo.cumprimetar())
``` |
{
"source": "josevga/adventofcode",
"score": 4
} |
#### File: adventofcode/2020/day07-1.py
```python
filename = "day07-rules.txt"
# filename = "day07-examples.txt"
with open(filename, "r") as f:
lines = [line.rstrip(".\n") for line in f.readlines()]
print(f"Input {filename=} with {len(lines)=}")
sep1 = " bags contain "
sep2 = ", "
my_bag = "shiny gold"
rules = {k: set([color.rstrip("bags").rstrip("bag").strip()
for num, color in [item.split(" ", 1)
for item in v.split(sep2)]])
for k, v in (line.split(sep1) for line in lines)}
def check(color, my_color):
# print(f"Cheking: {color} : {rules[color]}")
if rules[color] == {'other'}:
return False
if my_color in rules[color]:
return True
return any(check(inside_color, my_color) for inside_color in rules[color])
print(sum(
check(bag, my_bag) for bag in rules.keys()
))
```
#### File: adventofcode/2020/day07-2.py
```python
filename = "day07-rules.txt"
# filename = "day07-examples2.txt"
with open(filename, "r") as f:
lines = [line.rstrip(".\n") for line in f.readlines()]
print(f"Input {filename=} with {len(lines)=}")
sep1 = " bags contain "
sep2 = ", "
my_bag = "shiny gold"
rules = {k: [[color.rstrip("bags").rstrip("bag").strip(),
int(num) if num != "no" else 0]
for num, color in [item.split(" ", 1)
for item in v.split(sep2)]]
for k, v in (line.split(sep1) for line in lines)}
# print(rules)
def total_bags(color):
print(f"Cheking: {color} : {rules[color]}")
if len(rules[color]) == 1 and rules[color][0][1] == 0:
return 0
sum_bags = sum(item[1] for item in rules[color])
print(sum_bags)
return sum_bags + sum(item[1] * total_bags(item[0])
for item in rules[color])
print(total_bags(my_bag))
``` |
{
"source": "josevictorp81/converter",
"score": 4
} |
#### File: temperature/utils/newton.py
```python
def convert_newton_to(temperature_to: str, amount: float):
if temperature_to == 'celsiu(s)':
value = amount * (100 / 33)
if temperature_to == 'fahrenheit(s)':
value = amount * (60 / 11) + 32
if temperature_to == 'kelvin(s)':
value = amount * (100 / 33) + 273.15
if temperature_to == 'rankine(s)':
value = amount * (60 / 11) + 491.67
if temperature_to == 'reaumur(s)':
value = amount * (80 / 33)
if temperature_to == 'rømer(s)':
value = amount * (35 / 22) + 7.5
if temperature_to == 'newton(s)':
value = amount
if temperature_to == 'delisle(s)':
value = (33 - amount) * (50 / 11)
return value
```
#### File: temperature/utils/rankine.py
```python
def convert_rankine_to(temperature_to: str, amount: float):
if temperature_to == 'celsiu(s)':
value = amount * 0.55 - 273.15
if temperature_to == 'fahrenheit(s)':
value = amount - 459.67
if temperature_to == 'kelvin(s)':
value = amount * 0.55
if temperature_to == 'rankine(s)':
value = amount
if temperature_to == 'reaumur(s)':
value = (amount - 491.67) * 0.44
if temperature_to == 'rømer(s)':
value = (amount - 491.67) * (7 / 24) + 7.5
if temperature_to == 'newton(s)':
value = (amount - 491.67) * (11 / 60)
if temperature_to == 'delisle(s)':
value = (671.67 - amount) * (5 / 6)
return value
```
#### File: temperature/utils/romer.py
```python
def convert_romer_to(temperature_to: str, amount: float):
if temperature_to == 'celsiu(s)':
value = (amount - 7.5) * (40 / 21)
if temperature_to == 'fahrenheit(s)':
value = (amount - 7.5) * (24 / 7) + 32
if temperature_to == 'kelvin(s)':
value = (amount - 7.5) * (40 / 21) + 273.15
if temperature_to == 'rankine(s)':
value = (amount - 7.5) * (40 / 21) + 491.67
if temperature_to == 'reaumur(s)':
value = (amount - 7.5) * (32 / 21)
if temperature_to == 'rømer(s)':
value = amount
if temperature_to == 'newton(s)':
value = (amount - 7.5) * (22 / 35)
if temperature_to == 'delisle(s)':
value = (60 - amount) * (20 / 7)
return value
``` |
{
"source": "josevictorp81/Ordenacao-e-Busca",
"score": 4
} |
#### File: Ordenacao-e-Busca/Ordenacao/bubbleSort.py
```python
def bubble(v):
for i in range(len(v)):# percorre todo a lista
for j in range(len(v) - 1):# percorre a lista varias vezes
if v[j] > v[j + 1]:# compara cada posicao da lista
v[j], v[j + 1] = v[j + 1], v[j]# troca se o proximo for menos que o anterior
vet = [9, 1, 2, 7, 6, 0, 5, 11, 12, 8, 3, 4, 10]
bubble(vet)
print(vet)
```
#### File: Ordenacao-e-Busca/Pesquisa/pesquisaSequencial.py
```python
def pesquisa(vetor, chave):
for i in range(len(vetor)):
#print(i)
if vetor[i] == chave:
return i
return None
vet = [2, 4, 6, 1, 9, 10]
ch = int(input('Chave: '))
p = pesquisa(vet, ch)
if p is None:
print('Item buscado nao esta na lista')
else:
print('A chave {} esta na posicao {} da lista'.format(ch, p))
``` |
{
"source": "josevictorp81/Quiz",
"score": 2
} |
#### File: Quiz/auth/serializers.py
```python
from rest_framework import serializers
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
extra_kwargs = {
'password': {'write_only': True}
}
model = User
fields = ['id', 'username', 'password']
def validate(self, attrs):
user = User.objects.filter(username=attrs['username']).first()
if user:
raise serializers.ValidationError(detail='User with this username already existes!')
return super().validate(attrs)
def create(self, validated_data):
user = User.objects.create(username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
return user
``` |
{
"source": "josevictorp81/Social-Login",
"score": 3
} |
#### File: Social-Login/authentication/forms.py
```python
from django import forms
from django.contrib.auth import get_user_model
from .models import User
User = get_user_model()
class UserCreateForm(forms.ModelForm):
password = forms.CharField(min_length=8)
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'password']
def clean_email(self):
email = self.cleaned_data['email']
queryset = User.objects.filter(email=email)
if queryset.exists():
raise forms.ValidationError("email ja existe!")
return email
def save(self, commit: bool = True):
user = super(UserCreateForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
user.save()
return user
```
#### File: Social-Login/authentication/models.py
```python
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractUser
class CustomUserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('Email nao informado')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser precisa ter is_superuser=True')
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser precisa ter is_staff=True')
if extra_fields.get('is_active') is not True:
raise ValueError('Superuser precisa ter is_active=True')
return self.create_user(email, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField(max_length=50, unique=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
object = CustomUserManager()
def __str__(self) -> str:
return self.email
def save(self, *args, **kwargs):
self.username = self.first_name
return super().save(*args, **kwargs)
``` |
{
"source": "josevictorp81/Tasks",
"score": 2
} |
#### File: tasks/core/models.py
```python
from django.db import models
from django.contrib.auth import get_user_model
class Task(models.Model):
STATUS = (
('Doing', 'Doing'),
('Done', 'Done'),
)
title = models.CharField(max_length=150)
description = models.TextField()
done = models.CharField(max_length=5, choices=STATUS)
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
``` |
{
"source": "josevictorp81/Uri-questions-solutions",
"score": 4
} |
#### File: Uri-questions-solutions/python/1094.py
```python
def percentual(qtde, total):
return (100 * qtde) / total
n = int(input())
c = r = s = 0
for i in range(n):
val, tipo = input().split()
qtde = int(val)
if tipo == 'C':
c += qtde
if tipo == 'R':
r += qtde
if tipo == 'S':
s += qtde
total = c + r + s
print('Total: {} cobaias'.format(total))
print('Total de coelhos: {}\nTotal de ratos: {}\nTotal de sapos: {}'.format(c, r, s))
print('Percentual de coelhos: {:.2f} %'.format(percentual(c, total)))
print('Percentual de ratos: {:.2f} %'.format(percentual(r, total)))
print('Percentual de sapos: {:.2f} %'.format(percentual(s, total)))
```
#### File: Uri-questions-solutions/python/2780.py
```python
def result(n):
if(n <= 800):
return 1
elif(n > 800 and n <= 1400):
return 2
else:
return 3
d = int(input())
print(result(d))
``` |
{
"source": "josevictorp81/Weather",
"score": 2
} |
#### File: weather/core/views.py
```python
from django.shortcuts import render
import requests
from django.conf import settings
def index(request):
if request.method == 'POST':
city = request.POST['city']
url = f'http://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&lang=pt_br&appid={settings.OPEN_WEATHER_MAP_API_KEY}'
result = requests.get(url=url).json()
#print(result)
weather = {
'city': city,
'temperature': result['main']['temp'],
'description': result['weather'][0]['description'],
'icon': result['weather'][0]['icon'],
'place': result['sys']['country'],
}
data = {
'weather': weather,
}
return render(request, 'index.html', data)
else:
return render(request, 'index.html')
``` |
{
"source": "josevini/editor-python",
"score": 3
} |
#### File: josevini/editor-python/modAccess.py
```python
import os
import os.path
from modInput import *
from modShow import *
def accessFile():
while True:
name, ext = os.path.splitext(getText('Quer ler qual arquivo: '))
filename = (name + ext) if ext else (name + '.txt')
try:
with open(filename, 'r', encoding='utf-8') as file:
line('-', 42)
delay('CONTEÚDO:', 0)
content = file.read()
show(changeColor(content if content else 'Vazio!', 'blue'))
break
except FileNotFoundError:
delay(changeColor('Arquivo não encontrado!', 'red'))
except UnicodeDecodeError:
delay(changeColor('Formato de arquivo não suportado!', 'red'))
def accessDir():
while True:
dirname = getText('Quer listar qual pasta: ')
try:
listingResult = os.listdir(dirname)
line('-', 42)
show('CONTEÚDO:')
if listingResult:
for dir in listingResult:
show(changeColor(dir, 'blue'))
else:
show(changeColor('Vazio!', 'blue'))
break
except FileNotFoundError:
delay(changeColor('Pasta não encontrada!', 'red'))
def menuAccess():
while True:
line('-', 42)
createMenu('arquivo', 'pasta', msg='Deseja acessar um arquivo ou pasta?')
line('-', 42)
op = interval(getNumber('Escolha uma opção: '), 0, 2)
if op == 0:
delay(changeColor('Cancelando...', 'red'))
break
elif op == 1:
accessFile()
elif op == 2:
accessDir()
```
#### File: josevini/editor-python/modShow.py
```python
import time
def createMenu(*ops, stop='cancelar', msg=''):
if msg:
print(msg)
for pos, op in enumerate(ops):
print(f'{pos + 1} - {op.capitalize() if op else "default"}')
print(f'0 - {stop.capitalize()}')
def show(msg=''):
print(msg)
def delay(msg='', seg=1.0):
print(msg)
time.sleep(seg)
def line(simb='', qtd=0):
print(simb * qtd)
def header(msg='', char='-', size=0):
line(char, size)
print(f'{msg.center(size)}')
line(char, size)
def changeColor(text='', color='black'):
colors = {
'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'purple': '\033[35m',
'gray': '\033[37m'
}
open = colors[color]
close = '\033[m'
return f'{open}{text}{close}'
def interval(value, min, max):
if min <= value <= max:
return value
print(changeColor(f'Digite um valor entre {min} e {max}', 'red'))
``` |
{
"source": "JoseVL92/sqlalchemy-connector",
"score": 3
} |
#### File: sqlalchemy-connector/sqlalchemy_multiconnector/sqlalchemy_multiconnector.py
```python
from datetime import datetime
from contextlib import contextmanager
from functools import wraps
from sqlalchemy import create_engine
from sqlalchemy.engine.row import Row
from sqlalchemy.exc import InvalidRequestError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, class_mapper, Session
BASE = declarative_base()
def decompose_fields(fields: list):
"""
Auxiliary func to check if 'fields' has a relationship expressed as <rel_name.rel_property>
:return Tuple (A, B) where A is the list of fields divided into possible relations and its subproperties,
and B is a boolean expressing if there is at least one relation in this fields
Ex: ([('name',), ('base')], False) ---> no relation
([('source', 'title'), ('name',)], True) ---> there is a relation with 'source'
"""
if not fields:
return [], False
# Check if there are any '*.*' pattern in any field,
# which indicates we need to retrieve some relationship property
splitted_fields = [f.split('.') for f in fields]
are_relations = [len(sf) > 1 for sf in splitted_fields]
return splitted_fields, any(are_relations)
def get_uris(db_type, db_host_or_path, db_port, db_name, db_user, db_passwd):
if not db_type or not db_host_or_path or not db_name:
raise ValueError("Not enough data")
if db_type == "sqlite":
# ensure that no trailing '/' is present
if db_host_or_path[-1] == '/':
db_host_or_path = db_host_or_path[:-1]
uri = f"sqlite:///{db_host_or_path}"
# return {"default": f"sqlite:///{db_host_or_path}"}
else:
uri = f"{db_type}://"
if db_user:
uri += db_user
if db_passwd:
uri += ":" + db_passwd
uri += "@"
uri += db_host_or_path
if db_port:
uri += ":" + str(db_port)
# if db_name is a list and db name 'default' is not specified,
# the default database would be the first in the db_name list
if isinstance(db_name, (list, tuple, set)) and len(db_name) > 0:
uri_dict = {name: uri + "/" + name for name in db_name}
if 'default' not in uri_dict:
uri_dict['default'] = uri + "/" + db_name[0]
elif isinstance(db_name, str):
uri_dict = {'default': uri + "/" + db_name}
else:
raise ValueError("db_name invalid value")
return uri_dict
def manage_session(function):
"""Ensure correct session management in transactions"""
@wraps(function)
def manager(*args, **kwargs):
if 'session' not in kwargs:
db_name = kwargs.get('db_name') or 'default'
schema_name = kwargs.get('schema_name')
with args[0].session_scope(engine_name=db_name, schema_name=schema_name) as session:
kwargs.update({"session": session})
return function(*args, **kwargs)
return function(*args, **kwargs)
return manager
def to_dict(obj, found=None, recursive=False):
if isinstance(obj, Row):
return obj._asdict()
if found is None:
found = set()
mapper = class_mapper(obj.__class__)
columns = [column.key for column in mapper.columns]
get_key_value = lambda c: (c, getattr(obj, c).isoformat()) if isinstance(getattr(obj, c), datetime) else (
c, getattr(obj, c))
out = dict(map(get_key_value, columns))
if recursive:
for name, relation in mapper.relationships.items():
if relation not in found:
found.add(relation)
related_obj = getattr(obj, name)
if related_obj is not None:
if relation.uselist:
out[name] = [to_dict(child, found, True) for child in related_obj]
else:
out[name] = to_dict(related_obj, found, True)
return out
class SQLConnector:
def __init__(self, db_type, db_host_or_path, db_name, db_port=None, db_schemas=None, db_user=None, db_passwd=None,
session_autoflush=True, session_autocommit=False):
"""
Creates an object with necessary parameters for connecting to a sql database
:param db_type: One of 'sqlite', 'postgresql' or 'mysql'
:param db_host_or_path: If db_type=='sqlite', it is the absolute path of the folder containing the file, otherwise it is a hostname or ip
:param db_name: If just one database will be used, it is a single db name (a file name if db_name='sqlite').
If multiple databases, it would be a list of db names or file names.
:param db_port: Port where db server is listening. None if db_type='sqlite'
:param db_schemas: List of schemas used on every specified database
:param db_user: Db server login user. None if db_type='sqlite'
:param db_passwd: Db server login password. None if db_type='sqlite'
"""
allowed_types = ("sqlite", "postgresql", "mysql")
if not db_name:
raise AttributeError("Must specify at least one db_name")
if db_type in allowed_types:
if db_type != 'sqlite' and not (db_name and db_user and db_passwd):
raise AttributeError(f"db_user and db_password must be declared for {db_type}")
self.connection_uris = get_uris(db_type, db_host_or_path, db_port, db_name, db_user, db_passwd)
else:
raise ValueError(f"{db_type} not in {str(allowed_types)}")
self.schemas = db_schemas if not db_type == 'sqlite' else None
if isinstance(self.schemas, str):
self.schemas = [self.schemas]
self.engines = {
name: create_engine(uri) for name, uri in self.connection_uris.items()
}
self.Session = sessionmaker(autoflush=session_autoflush, autocommit=session_autocommit)
def create_tables(self, schemas: [] = None):
schemas = schemas or self.schemas
if isinstance(schemas, str):
schemas = [schemas]
self._create_schemas(schemas)
for _, engine in self.engines.items():
if schemas is not None:
for sc in self.schemas:
BASE.metadata.create_all(
bind=engine.connect().execution_options(
schema_translate_map={None: sc}
)
)
else:
BASE.metadata.create_all(engine)
def _create_schemas(self, schemas: [] = None):
schemas = schemas or self.schemas
if schemas is None:
return
if isinstance(schemas, str):
schemas = [schemas]
for engine_name, _ in self.engines.items():
for sc in schemas:
self.execute_query("CREATE SCHEMA IF NOT EXISTS " + sc, engine_name)
def _dynamic_relations(self, resource_orm_class: BASE, rel_deep_list: list):
chained = getattr(resource_orm_class, rel_deep_list[0])
if len(rel_deep_list) > 1:
return self._dynamic_relations(chained, rel_deep_list[1:])
return chained
def execute_query(self, query: str, engine_name: str = None):
"""Execute a raw query on database 'engine_name'.
If any schema will be used, it must be specified in the sql statement"""
if engine_name is None:
engine_name = 'default'
engine = self.engines.get(engine_name)
if engine is None:
raise ValueError(f"No engine with name {engine_name}")
connection = engine.connect()
response = connection.execute(query)
returnable = False
if hasattr(response, '.fetch_all()'):
response = response.fetch_all()
returnable = True
connection.close()
if returnable:
return response
@manage_session
def compose_filter_query(self,
resource_orm_class: BASE, resource_query_binding_class, filter_and_sort_dict: dict = None,
fields: list = None, limit: int = 1000, offset: int = 0, *, session: Session = None):
"""
Same as 'list_resources' but only returns the total count and query itself, not evaluated
:return: SQLAlchemy Query object
"""
_, are_relations = decompose_fields(fields)
if filter_and_sort_dict:
query = resource_query_binding_class(session=session).evaluate_params(filter_and_sort_dict)
else:
query = session.query(resource_orm_class)
if fields and not are_relations:
columns = [getattr(resource_orm_class, f) for f in fields]
query = query.with_entities(*columns)
total_count = 0
if limit or offset:
total_count = query.count()
# slice operation was kept with documentation purposes
if limit and offset:
end_index = offset + limit
query = query.slice(offset, end_index)
elif limit:
query = query.limit(limit)
elif offset:
query = query.offset(offset)
return total_count, query
@manage_session
def create_resource(self, resource_orm_class: BASE, resource_fields: dict, *, return_id: bool = False,
session: Session = None, **kwargs):
"""
Add a resource. Doesn't check for integrity errors. Valid for resources without foreign keys.
:param resource_orm_class: ORM class related to the resource
:param resource_fields: Dictionary with column names of the new object as keys and their respective values
:param return_id: If it needs to commit this query to catch the new autocreated 'id' and returning it
:param session: Session to be used to execute query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: True (or resource 'id' if return_id is True) if the operation succeeded
"""
resource = resource_orm_class(**resource_fields)
session.add(resource)
if return_id:
session.flush()
session.refresh(resource)
return resource.id
return True
@manage_session
def delete_resource(self, resource_orm_class: BASE, pk, *, session: Session = None, **kwargs):
"""
Deletes a resource
:param resource_orm_class: ORM class related to the resource
:param pk: Primary key value
:param session: Session to be used to execute query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: True if the operation succeeded
"""
resource = session.query(resource_orm_class).get(pk)
if resource is not None:
session.delete(resource)
return True
@manage_session
def get_resource(self, resource_orm_class: BASE, pk, pk_fieldname: str = None, fields: list = None, *,
just_check_existence: bool = False, session: Session = None, **kwargs):
"""
Get details about a specific resource.
:param resource_orm_class: ORM class related to the resource
:param pk: Primary key value
:param pk_fieldname: Primary key column name.
:param fields: Desired columns to be returned.
:param just_check_existence: If this method is invoked just to check resource existence
:param session: Session to be used to execute query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: A dictionary with the resource information
:raise: ValueError if no resource with 'pk' primary key value is found
"""
splitted_fields, are_relations = decompose_fields(fields)
if not pk_fieldname or are_relations:
resource = session.query(resource_orm_class).get(pk)
else:
# retrieving specific fields is a much more efficient way to query
fields = [getattr(resource_orm_class, f) for f in fields]
resource = session.query(*fields).filter(getattr(resource_orm_class, pk_fieldname) == pk).one_or_none()
if just_check_existence:
return resource is not None
if resource is None:
raise ValueError(f"Resource '{resource_orm_class.__tablename__}' with pk='{pk}' not found")
if fields:
return {'.'.join(sf): self._dynamic_relations(resource, sf) for sf in splitted_fields}
return to_dict(resource)
@manage_session
def list_resources(self, resource_orm_class: BASE, resource_query_binding_class, filter_and_sort_dict: dict = None,
fields: list = None, limit: int = 1000, offset: int = 0, *, session: Session = None, **kwargs):
"""
Get a list of resources that meet a set of parameters
:param resource_orm_class: ORM class related to the resource
:param resource_query_binding_class: QueryBinding-based class (from 'sqlalchemy-filterparams')
:param filter_and_sort_dict: Dictionary of options specified by 'filterparams' library
:param fields: Columns to be selected
:param limit: Max number of rows fetched
:param offset: Number of rows to skip before starting to return rows from the query
:param session: Session to be used to execute the query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: A dictionary with shape {"total": total_count, "resources": [resources_list]}
"""
if limit > 1000:
raise ValueError("Limit out of bounds")
total_count, query = self.compose_filter_query(resource_orm_class, resource_query_binding_class,
filter_and_sort_dict, fields, limit, offset, session=session)
# if are_relations, returned query just ignored fields
splitted_fields, are_relations = decompose_fields(fields)
resources_list = query.all()
if not total_count:
total_count = len(resources_list)
if fields:
response = [
{'.'.join(sf): self._dynamic_relations(resource, sf) for sf in splitted_fields} for resource in
resources_list
]
else:
response = [to_dict(rsc) for rsc in resources_list]
# returns a list of sources, but first element is the amount of sources without pagination
return {"total": total_count, "resources": response}
@manage_session
def update_resource(self, resource_orm_class: BASE, pk, updated_fields: dict, *, raise_if_bad_field: bool = False,
session: Session = None, **kwargs):
"""
Update a resource. Valid for resources without foreign keys
:param resource_orm_class: ORM class related to the resource
:param pk: Primary key of the existing resource
:param updated_fields: Dictionary with column names of the updated object as keys and their respective new values
:param raise_if_bad_field: True if you want to raise an exception when a non-existent field is specified for update
:param session: Session to be used to execute the query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: True if everything goes well
:raise ValueError if some
"""
resource = session.query(resource_orm_class).get(pk)
if resource is None:
raise ValueError(f"No record in table '{resource_orm_class.__tablename__}' with pk='{pk}'")
for field, new_value in updated_fields.items():
if not hasattr(resource, field):
if raise_if_bad_field:
raise ValueError(f"Table '{resource_orm_class.__tablename__}' has no '{field}' column")
# fails silently by default
continue
setattr(resource, field, new_value)
# nothing else is needed because the execution of session.commit() is made out of this method
return True
@contextmanager
def session_scope(self, engine_name: str = None, schema_name: str = None):
"""Provide a transactional scope around a series of operations."""
engine_name = engine_name or 'default'
engine = self.engines.get(engine_name)
if engine is None:
raise ValueError(f"No engine with name {engine_name}")
if schema_name:
connection = engine.connect().execution_options(
schema_translate_map={None: schema_name}
)
session = self.Session(bind=connection)
else:
session = self.Session(bind=engine)
try:
yield session
session.commit()
except InvalidRequestError:
session.rollback()
except Exception:
session.rollback()
raise
finally:
session.close()
def kill(self):
for engine in self.engines:
self.engines[engine].dispose()
``` |
{
"source": "Jose-V-Melo/Pizzaria_Luigi_API",
"score": 2
} |
#### File: Jose-V-Melo/Pizzaria_Luigi_API/main.py
```python
from typing import List
from fastapi import FastAPI, Depends, status
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm.session import Session
from sqlalchemy.orm import Session
from src.infra.sqlalchemy.repositories.categoria import RepositoryCategoria
from src.infra.sqlalchemy.repositories.produto import RepositoryProduto
from src.infra.sqlalchemy.config.database import get_db, create_db
from src.schemas.usuario import Usuario, UsuarioSimples
from src.schemas.categoria import Categoria
from src.schemas.produto import Produto
from src.infra.sqlalchemy.repositories.usuario import RepositoryUsuario
app = FastAPI()
origins = [
"https://192.168.100.18:4000",
"https://pizzarialuigi.ddns.net",
"http://localhost:4000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.post('/signup', status_code=status.HTTP_201_CREATED, response_model=UsuarioSimples)
def signup(usuario: Usuario, db: Session = Depends(get_db)):
produto_criado = RepositoryUsuario(db).create(usuario)
return produto_criado
@app.get('/usuarios', status_code=status.HTTP_200_OK, response_model=List[Usuario])
def listar_usuarios(db: Session = Depends(get_db)):
produtos = RepositoryUsuario(db).read()
return produtos
@app.put('/usuarios/{id_usuario}', status_code=status.HTTP_200_OK, response_model=Usuario)
def editar_usuario(id_usuario, usuario: Usuario, db: Session = Depends(get_db)):
RepositoryUsuario(db).update(id_usuario, usuario)
return usuario
@app.delete('/usuarios/{id_usuario}', status_code=status.HTTP_200_OK)
def excluir_usuario(id_usuario, db: Session = Depends(get_db)):
RepositoryUsuario(db).delete(id_usuario)
return {"mensagem": "usuário excluído com sucesso!"}
@app.post('/categorias', status_code=status.HTTP_201_CREATED, response_model=Categoria)
def criar_categoria(categoria: Categoria, session: Session = Depends(get_db)):
categoria_criada = RepositoryCategoria(session).create(categoria)
return categoria_criada
@app.get('/categorias', response_model=List[Categoria])
def listar_categorias(session: Session = Depends(get_db)):
categorias = RepositoryCategoria(session).list()
return categorias
@app.post('/produtos', status_code=status.HTTP_201_CREATED, response_model=Produto)
def criar_produto(produto: Produto, session: Session = Depends(get_db)):
produto_criado = RepositoryProduto(session).create(produto)
return produto_criado
@app.get('/produtos', response_model=List[Produto])
def listar_produtos(session: Session = Depends(get_db)):
produtos = RepositoryProduto(session).list()
return produtos
```
#### File: sqlalchemy/repositories/usuario.py
```python
from datetime import datetime
from sqlalchemy import update, delete
from sqlalchemy.orm import Session
import src.schemas.usuario as schemas
import src.infra.sqlalchemy.models.usuario as models
class RepositoryUsuario():
def __init__(self, session: Session):
self.session = session
def create(self, usuario: schemas.Usuario):
db_usuario = models.Usuario(
nome=usuario.nome,
senha=<PASSWORD>,
email=usuario.email,
telefone=usuario.telefone,
foto=usuario.foto,
)
self.session.add(db_usuario)
self.session.commit()
self.session.refresh(db_usuario)
return db_usuario
def read(self):
usuarios = self.session.query(models.Usuario).all()
return usuarios
def update(self, id_usuario: int, usuario: schemas.Usuario):
stmt = (
update(models.Usuario).where(models.Usuario.id == id_usuario).
values(
nome=usuario.nome,
senha=<PASSWORD>,
email=usuario.email,
telefone=usuario.telefone,
foto=usuario.foto,
updated_at=datetime.now()
)
)
self.session.execute(stmt)
self.session.commit()
def delete(self, id: int):
stmt = delete(models.Usuario).where(models.Usuario.id == id)
self.session.execute(stmt)
self.session.commit()
``` |
{
"source": "josevnz/EnableSysadminRssReader",
"score": 3
} |
#### File: EnableSysadminRssReader/tests/test_reader.py
```python
import unittest
from pathlib import Path
from enablesysadminrssreader import parse_rss, get_rss
TEST_DIR = Path(__file__).parent
class RssReaderTestCase(unittest.TestCase):
def setUp(self) -> None:
rss_file = TEST_DIR.joinpath('rss.xml')
with open(rss_file, 'r') as rss:
self.rss = rss.read()
def test_parse_rss(self):
parsed_rss = parse_rss(self.rss)
self.assertIsNotNone(parsed_rss)
for article in parsed_rss:
for key in ['title', 'link', 'description']:
self.assertIn(key, article)
self.assertIsNotNone(article[key])
def test_get_rss(self):
raw_xml = get_rss()
self.assertIsNotNone(raw_xml)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "josevnz/ExtendingAnsibleWithPython",
"score": 3
} |
#### File: plugins/inventory/nmap_plugin.py
```python
import os.path
from subprocess import CalledProcessError
import os
import shlex
import shutil
import subprocess
from typing import List, Dict, Any
from xml.etree import ElementTree
# The imports below are the ones required for an Ansible plugin
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable
DOCUMENTATION = r'''
name: nmap_plugin
plugin_type: inventory
short_description: Returns a dynamic host inventory from Nmap scan
description: Returns a dynamic host inventory from Nmap scan, filter machines that can be accessed with SSH
options:
plugin:
description: Name of the plugin
required: true
choices: ['nmap_plugin']
address:
description: Address to scan, in Nmap supported format
required: true
'''
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'nmap_plugin'
def __init__(self):
super(InventoryModule, self).__init__()
self.address = None
self.plugin = None
def verify_file(self, path: str):
if super(InventoryModule, self).verify_file(path):
return path.endswith('yaml') or path.endswith('yml')
return False
def parse(self, inventory: Any, loader: Any, path: Any, cache: bool = True) -> Any:
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path) # This also loads the cache
try:
self.plugin = self.get_option('plugin')
self.address = self.get_option('address')
hosts_data = list(NmapRunner(self.address))
if not hosts_data:
raise AnsibleParserError("Unable to get data for Nmap scan!")
for host_data in hosts_data:
for name, address in host_data.items():
self.inventory.add_host(name)
self.inventory.set_variable(name, 'ip', address)
except KeyError as kerr:
raise AnsibleParserError(f'Missing required option on the configuration file: {path}', kerr)
except CalledProcessError as cpe:
raise AnsibleParserError("There was an error while calling Nmap", cpe)
class OutputParser:
def __init__(self, xml: str):
self.xml = xml
def get_addresses(self) -> List[Dict[str, str]]:
"""
Several things need to happen for an address to be included:
1. Host is up
2. Port is TCP 22
3. Port status is open
4. Uses IPv4
"""
addresses = []
root = ElementTree.fromstring(self.xml)
for host in root.findall('host'):
name = None
for hostnames in host.findall('hostnames'):
for hostname in hostnames:
name = hostname.attrib['name']
break
if not name:
continue
is_up = True
for status in host.findall('status'):
if status.attrib['state'] == 'down':
is_up = False
break
if not is_up:
continue
port_22_open = False
for ports in host.findall('ports'):
for port in ports.findall('port'):
if port.attrib['portid'] == '22':
for state in port.findall('state'):
if state.attrib['state'] == "open": # Up not the same as open, we want SSH access!
port_22_open = True
break
if not port_22_open:
continue
address = None
for address_data in host.findall('address'):
address = address_data.attrib['addr']
break
addresses.append({name: address})
return addresses
class NmapRunner:
def __init__(self, hosts: str):
self.nmap_report_file = None
found_nmap = shutil.which('nmap', mode=os.F_OK | os.X_OK)
if not found_nmap:
raise ValueError("Nmap binary is missing!")
self.nmap = found_nmap
self.hosts = hosts
def __iter__(self):
command = [self.nmap]
command.extend(__NMAP__FLAGS__)
command.append(self.hosts)
completed = subprocess.run(
command,
capture_output=True,
shell=False,
check=True
)
completed.check_returncode()
out_par = OutputParser(completed.stdout.decode('utf-8'))
self.addresses = out_par.get_addresses()
return self
def __next__(self):
try:
return self.addresses.pop()
except IndexError:
raise StopIteration
"""
Convert the args for proper usage on the Nmap CLI
Also, do not use the -n flag. We need to resolve IP addresses to hostname, even if we sacrifice a little bit of speed
"""
NMAP_DEFAULT_FLAGS = {
'-p22': 'Port 22 scanning',
'-T4': 'Aggressive timing template',
'-PE': 'Enable this echo request behavior. Good for internal networks',
'--disable-arp-ping': 'No ARP or ND Ping',
'--max-hostgroup 50': 'Hostgroup (batch of hosts scanned concurrently) size',
'--min-parallelism 50': 'Number of probes that may be outstanding for a host group',
'--osscan-limit': 'Limit OS detection to promising targets',
'--max-os-tries 1': 'Maximum number of OS detection tries against a target',
'-oX -': 'Send XML output to STDOUT, avoid creating a temp file'
}
__NMAP__FLAGS__ = shlex.split(" ".join(NMAP_DEFAULT_FLAGS.keys()))
``` |
{
"source": "josevnz/kismet_home",
"score": 2
} |
#### File: kismet_home/test/test_integration_kismet.py
```python
import json
import os
import unittest
import uuid
from datetime import datetime
from pathlib import Path
from unittest import TestCase
from requests import HTTPError
from kismet_home import CONSOLE
from kismet_home.config import Reader
from kismet_home.kismet import KismetWorker, KismetAdmin, KismetResultsParser
BASEDIR = Path(__file__).parent
class TestKismetWorker(TestCase):
config_reader = Reader()
def test_check_session(self):
kw = KismetWorker(
api_key=TestKismetWorker.config_reader.get_api_key(),
url=TestKismetWorker.config_reader.get_url()
)
kw.check_session()
def test_check_system_status(self):
kw = KismetWorker(
api_key=TestKismetWorker.config_reader.get_api_key(),
url=TestKismetWorker.config_reader.get_url()
)
status = kw.check_system_status()
self.assertIsNotNone(status)
self.assertIn('kismet.system.memory.rss', status)
def test_get_alert_definitions(self):
kw = KismetWorker(
api_key=TestKismetWorker.config_reader.get_api_key(),
url=TestKismetWorker.config_reader.get_url()
)
defintions = kw.get_alert_definitions()
self.assertIsNotNone(defintions)
self.assertIn('kismet.alert.definition.description', defintions[0])
def test_parse_alert_definitions(self):
with open(BASEDIR.joinpath('alert_definitions.json'), 'r') as json_file:
data = json.load(json_file)
alert_definitions = KismetResultsParser.parse_alert_definitions(
alert_definitions=data
)
self.assertIsNotNone(alert_definitions)
for definitions in alert_definitions:
self.assertIn('description', definitions)
self.assertIsNotNone(definitions['description'])
def test_process_alerts(self):
with open(BASEDIR.joinpath('alerts_example.json'), 'r') as json_file:
data = json.load(json_file)
alerts, severities, types = KismetResultsParser.process_alerts(
alerts=data
)
self.assertIsNotNone(alerts)
for alert in alerts:
self.assertIn('text', alert)
self.assertIsNotNone(alert['text'])
self.assertIsNotNone(severities)
for severity in severities:
self.assertIsNotNone(severities[severity])
self.assertIsNotNone(types)
for stype in types:
self.assertIsNotNone(types[stype])
def test_pretty_timestamp(self):
timestamps = {
1645833048.375856: datetime(2022, 2, 25, 18, 50, 48, 375856),
1645739791.814681: datetime(2022, 2, 24, 16, 56, 31, 814681)
}
for timestamp in timestamps:
dt = KismetResultsParser.pretty_timestamp(timestamp)
self.assertEqual(timestamps[timestamp], dt)
@unittest.SkipTest
def test_define_alert(self):
if 'ADMIN_SESSION_API' not in os.environ:
CONSOLE.log("'ADMIN_SESSION_API' environment variable not defined. Skipping this test")
return
ka = KismetAdmin(
api_key=os.environ['ADMIN_SESSION_API'],
url=TestKismetWorker.config_reader.get_url()
)
name = str(uuid.SafeUUID)
ka.define_alert(
name=name,
description='Generic test alert'
)
def test_get_all_alerts(self):
if 'ADMIN_SESSION_API' not in os.environ:
CONSOLE.log("'ADMIN_SESSION_API' environment variable not defined. Skipping this test")
return
ka = KismetAdmin(
api_key=os.environ['ADMIN_SESSION_API'],
url=TestKismetWorker.config_reader.get_url()
)
# Send a bad alert first
try:
ka.raise_alert(
name='FAKE',
message="Fake alert for integration test!"
)
self.fail("Unknown alert type was supposed to happen!")
except HTTPError:
pass
ka.raise_alert(name='FORMATSTRING', message='Please ignore, this is a test alert')
kw = KismetWorker(
api_key=TestKismetWorker.config_reader.get_api_key(),
url=TestKismetWorker.config_reader.get_url()
)
all_alerts = kw.get_all_alerts()
self.assertIsNotNone(all_alerts)
found = False
for alert in all_alerts:
if alert['kismet.alert.text'] == 'Please ignore, this is a test alert':
found = True
break
self.assertTrue(found, "Could not find any custom alerts!")
``` |
{
"source": "josevnz/rpm_query",
"score": 3
} |
#### File: rpm_query/bin/rpmq_dearpygui.py
```python
import argparse
import textwrap
from reporter import __is_valid_limit__
from reporter.rpm_query import QueryHelper
import dearpygui.dearpygui as dpg
TABLE_TAG = "query_table"
MAIN_WINDOW_TAG = "main_window"
def __cli_args__() -> argparse.Namespace:
"""
Command line argument parsing
:return:
"""
parser = argparse.ArgumentParser(description=textwrap.dedent(__doc__))
parser.add_argument(
"--limit",
type=__is_valid_limit__, # Custom limit validator
action="store",
default=QueryHelper.MAX_NUMBER_OF_RESULTS,
help="By default results are unlimited but you can cap the results"
)
parser.add_argument(
"--name",
type=str,
action="store",
default="",
help="You can filter by a package name."
)
parser.add_argument(
"--sort",
action="store_false",
help="Sorted results are enabled bu default, but you fan turn it off"
)
return parser.parse_args()
def __reset_form__():
dpg.set_value("package_name", args.name)
dpg.set_value("limit_text", args.limit)
dpg.set_value("sort_by_size", args.sort)
def __run_initial_query__(
*,
package: str,
limit: int,
sorted_elem: bool
) -> None:
"""
Need to ensure the table gets removed.
See issue: https://github.com/hoffstadt/DearPyGui/issues/1350
:return:
"""
if dpg.does_alias_exist(TABLE_TAG):
dpg.delete_item(TABLE_TAG, children_only=False)
if dpg.does_alias_exist(TABLE_TAG):
dpg.remove_alias(TABLE_TAG)
with dpg.table(header_row=True, resizable=True, tag=TABLE_TAG, parent=MAIN_WINDOW_TAG):
dpg.add_table_column(label="Name", parent=TABLE_TAG)
dpg.add_table_column(label="Size (bytes)", default_sort=True, parent=TABLE_TAG)
with QueryHelper(
name=package,
limit=limit,
sorted_val=sorted_elem
) as rpm_query:
for package in rpm_query:
with dpg.table_row(parent=TABLE_TAG):
dpg.add_text(f"{package['name']}-{package['version']}")
dpg.add_text(f"{package['size']:,.0f}")
def __run__query__() -> None:
__run_initial_query__(
package=dpg.get_value("package_name"),
limit=dpg.get_value("limit_text"),
sorted_elem=dpg.get_value("sort_by_size")
)
if __name__ == "__main__":
args = __cli_args__()
dpg.create_context()
with dpg.window(label="RPM Search results", tag=MAIN_WINDOW_TAG):
dpg.add_text("Run a new search")
dpg.add_input_text(label="Package name", tag="package_name", default_value=args.name)
with dpg.tooltip("package_name"):
dpg.add_text("Leave empty to search all packages")
dpg.add_checkbox(label="Sort by size", tag="sort_by_size", default_value=args.sort)
dpg.add_slider_int(
label="Limit",
default_value=args.limit,
tag="limit_text",
max_value=QueryHelper.MAX_NUMBER_OF_RESULTS
)
with dpg.tooltip("limit_text"):
dpg.add_text(f"Limit to {QueryHelper.MAX_NUMBER_OF_RESULTS} number of results")
with dpg.group(horizontal=True):
dpg.add_button(label="Search", tag="search", callback=__run__query__)
with dpg.tooltip("search"):
dpg.add_text("Click here to search RPM")
dpg.add_button(label="Reset", tag="reset", callback=__reset_form__)
with dpg.tooltip("reset"):
dpg.add_text("Reset search filters")
__run_initial_query__(
package=args.name,
limit=args.limit,
sorted_elem=args.sort
)
dpg.create_viewport(title='RPM Quick query tool')
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
``` |
{
"source": "josewails/django-magicauth",
"score": 3
} |
#### File: django-magicauth/magicauth/utils.py
```python
import binascii
import os
from django import forms
from . import settings as magicauth_settings
def generate_token():
return binascii.hexlify(os.urandom(20)).decode()
def raise_error(email=None):
"""
Just raise an error - this can be used as a call back function
when no user was found in DB during the login process.
"""
raise forms.ValidationError(magicauth_settings.EMAIL_UNKNOWN_MESSAGE)
```
#### File: django-magicauth/tests/test_4_wait_view.py
```python
from pytest import mark
from django.shortcuts import reverse
from magicauth import settings
import urllib.parse
from tests import factories
'''
Step 4 of login process : see doc in magicauth/views.py for step details
Note : We do not test that the javascript actually does the redirect. We just test the django template's context.
'''
pytestmark = mark.django_db
def open_magic_link_with_wait(client, token, next=None):
url = reverse("magicauth-wait", kwargs={"key": token.key})
if next:
# Encode the url (with urllib.parse.quote) otherwise URL params get lost.
url += '?next=' + urllib.parse.quote(next)
return client.get(url)
def test_wait_page_loads(client):
url = reverse("magicauth-wait", kwargs={"key": 'some-token'})
response = client.get(url)
assert response.status_code == 200
def test_wait_page_will_redirect_to_validate_token(client):
token = factories.MagicTokenFactory()
response = open_magic_link_with_wait(client, token)
redirect_url = reverse('magicauth-validate-token', kwargs={"key": token.key})
assert redirect_url in response.context_data['next_step_url']
def test_wait_page_will_redirect_with_next_param(client):
token = factories.MagicTokenFactory()
response = open_magic_link_with_wait(client, token, '/test_dashboard/')
assert 'next=/test_dashboard/' in response.context_data['next_step_url']
def test_wait_page_will_redirect_with_default_next_param(client):
token = factories.MagicTokenFactory()
response = open_magic_link_with_wait(client, token)
assert 'next=/landing/' in response.context_data['next_step_url']
def test_wait_page_will_redirect_in_WAIT_SECONDS(client):
token = factories.MagicTokenFactory()
response = open_magic_link_with_wait(client, token)
assert response.context_data['WAIT_SECONDS'] == settings.WAIT_SECONDS
def test_wait_page_raises_404_if_unsafe_next_url(client):
token = factories.MagicTokenFactory()
response = open_magic_link_with_wait(
client, token, 'http://www.myfishingsite.com/')
assert response.status_code == 404
``` |
{
"source": "josewails/gql-query-builder",
"score": 2
} |
#### File: josewails/gql-query-builder/setup.py
```python
from setuptools import setup, Command, find_packages
from pip._internal.req import parse_requirements
import os
install_reqs = parse_requirements('requirements.txt', session='hack')
requires = [str(ir.req) for ir in install_reqs]
class PublishCommand(Command):
user_options: list = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -rf dist/')
os.system('python setup.py sdist')
os.system('twine upload dist/*')
setup(
name='gql-query-builder',
description='This is a GraphQL query builder.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
use_scm_version=True,
url='https://github.com/youyo/gql-query-builder',
author='youyo',
author_email='<EMAIL>',
install_requires=requires,
license="MIT License",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='graphql gql query-builder',
packages=['gql_query_builder'],
python_requires='>=3.6',
project_urls={
'Source': 'https://github.com/youyo/gql-query-builder',
},
cmdclass={'publish': PublishCommand},
)
``` |
{
"source": "josexmercado/2clear",
"score": 3
} |
#### File: 2clear/models/Rproducts.py
```python
from db import db
class Rproducts(db.Model):
__tablename__ = 'rentalproducts'
id = db.Column(db.Integer, primary_key=True)
rproductname = db.Column(db.String(45))
rprice = db.Column(db.String(30))
rquantity = db.Column(db.String(20))
def __init__(self, *args, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def json(self):
return {
'id': self.id,
'rproductname': self.rproductname,
'rprice': self.rprice,
'rquantity': self.rquantity,
}
def insert(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
```
#### File: 2clear/models/Sales.py
```python
from db import db
class Sales(db.Model):
__tablename__ = 'sales'
salesid = db.Column(db.Integer, primary_key=True)
customerid = db.Column(db.String(45))
customername = db.Column(db.String(45))
totalsale = db.Column(db.Integer)
recordedby = db.Column(db.String(45))
date = db.Column(db.String(50))
ordernumber = db.Column(db.String(50))
def __init__(self, *args, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def json(self):
return {
'salesid': self.salesid,
'ordernumber':self.ordernumber,
'customerid': self.customerid,
'customername': self.salesid,
'totalsale': self.totalsale,
'recordedby': self.recordedby,
'date': self.date
}
@staticmethod
def getBydate(_date):
return Sales.query.filter_by(date=_date).first()
def insert(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
```
#### File: 2clear/resources/Customer.py
```python
from flask_restful import Resource, reqparse
from models.CustomerModel import CustomerModel
from models.Orderlist import Orderlist
from models.Orders import Orders
from models.Products import Products
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import update
from db import db
class CustomerRegister(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('customername',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('customeraddress',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('customercontact',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('onhandid',
type=str,
required=True,
)
parser.add_argument('topay',
type=str,
required=True,
)
parser.add_argument('account',
type=str,
required=True,
)
data = parser.parse_args()
new_customer = CustomerModel(
name=data.customername,
address=data.customeraddress,
number=data.customercontact,
onhandid=data.onhandid,
topay=data.topay,
account=data.account
)
new_customer.insert()
return {'message':'Customer added!'}
class getcustomer(Resource):
def get(self, _name):
customername= CustomerModel.getByName(_name)
return customername.json()
class UpdateAccount(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('id',
type=str,
)
parser.add_argument('account',
type=int,
required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
customer = CustomerModel.getById(data.id)
updatex = CustomerModel.query.filter_by(id=customer.id).first()
updatex.account= updatex.account - data.account
updatex.commit()
return {'message':'Customer Updated!'}
class UpdateAccount2(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('id',
type=str,
)
parser.add_argument('account',
type=int,
required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
customer = CustomerModel.getById(data.id)
updatex = CustomerModel.query.filter_by(id=customer.id).first()
updatex.account= updatex.account + data.account
updatex.commit()
return {'message':'Customer Updated!'}
class UpdateCustomer(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name',
type=str,
)
parser.add_argument('address',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('number',
type=str,
required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
customer = CustomerModel.getByName(data.name)
updatex = CustomerModel.query.filter_by(name=customer.name).first()
updatex.name= data.name
updatex.address= data.address
updatex.number= data.number
updatex.commit()
return {'message':'Customer Updated!'}
class deletecustomer(Resource):
def delete(self):
parser = reqparse.RequestParser()
parser.add_argument('name',
type=str,
)
data = parser.parse_args()
customer = CustomerModel.getByName(data.name)
delname = CustomerModel.query.filter_by(name = customer.name).first()
delname.delete()
delname.commit()
return {'message':'Product deleted!'}
class CustomerData(Resource):
def get(self, _id):
customer = CustomerModel.getById(_id)
return customer.json()
class customercontainer(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('orderid',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('type',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('quantity',
type=int,
# required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
Orderss = Orders.getById(data.orderid)
xupdatex = CustomerModel.query.filter_by(id=Orderss.customerid).first()
xupdatex.onhandid = CustomerModel.onhandid + data.quantity
xupdatex.commit()
return {'message':'wow!'}
class returncontainer(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('id',
type=int,
# required=True,
help="This field cannot be left blank!"
)
parser.add_argument('quantity',
type=int,
# required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
xupdatex = CustomerModel.query.filter_by(id=data.id).first()
xupdatex.onhandid = CustomerModel.onhandid - data.quantity
xupdatex.commit()
xupdatep = Products.query.filter_by(id="3").first()
xupdatep.quantity = Products.quantity + data.quantity
xupdatep.commit()
return {'message':'wow!'}
```
#### File: 2clear/resources/Rproducts.py
```python
from flask_restful import Resource, reqparse
from models.Rproducts import Rproducts
class Registerrentalproducts(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('rproductname',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('rprice',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('rquantity',
type=str,
required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
new_rproducts = Rproducts(
rproductname=data.rproductname,
rprice=data.rprice,
rquantity=data.rquantity
)
new_rproducts.insert()
return {'message':'Product Registered!'}
```
#### File: 2clear/resources/Sales.py
```python
from flask_restful import Resource, reqparse
from models.Orders import Orders
from models.Orderlist import Orderlist
from models.Sales import Sales
class recordsales(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('ordernumber',
type=str,
# required=True,
help="This field cannot be left blank!"
)
parser.add_argument('customerid',
type=str,
# required=True,
help="This field cannot be left blank!"
)
parser.add_argument('customername',
type=str,
# required=True,
help="This field cannot be left blank!"
)
parser.add_argument('totalsale',
type=str,
# required=True,
help="This field cannot be left blank!"
)
parser.add_argument('recordedby',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('date',
type=str,
required=True,
help="This field cannot be left blank!"
)
data = parser.parse_args()
new_sales = Sales(
customerid=data.customerid,
ordernumber=data.ordernumber,
customername=data.customername,
totalsale=data.totalsale,
recordedby=data.recordedby,
date=data.date,
)
new_sales.insert()
return {'message':'New SAles!'}
``` |
{
"source": "josexy/proxyGet",
"score": 2
} |
#### File: proxyGet/src/proxySQL.py
```python
import os
import pymysql
import sqlite3
from warnings import filterwarnings
from server.collectProxyList import ProxyIpInfo
from proxyMessage import CliMessage,Color
class proxySQL(object):
def __init__(self):
super().__init__()
def connect_db(self):pass
def create_database(self,database_name):pass
def create_table(self,table_name):pass
def insert(self,*args):pass
def remove(self,*args):pass
def update(self,*args):pass
def execute_sql(self, sql,*args):pass
def close(self):pass
def is_closed(self):pass
def select_table(self,table_name=None):pass
def select_database(self,database_name=None):pass
def fetch_one_row(self):pass
class proxySQLite(proxySQL):
def __init__(self,database_path,table_name=None,table_field=None,overwrite=False):
super().__init__()
if database_path=='db/test.db':
if not os.path.exists('db'):
os.mkdir('db')
self.database_path=database_path
self.table_field=table_field
self.table_name=table_name
self.is_open=False
self.unique_export=False
self.overwrite=overwrite
self.connect_db()
def __del__(self):
if self.is_open:
self.close()
def connect_db(self):
if self.is_open:
return False
if self.overwrite:
if os.path.exists(self.database_path):
os.unlink(self.database_path)
try:
self.conn=sqlite3.connect(self.database_path)
except Exception as e:
CliMessage.print_with_status(e.args,Color.Red,Color.Bold,None,'failed')
CliMessage.print_with_status(f"Failed to connect SQLite database...",Color.Red,Color.Bold,None,'failed')
exit(-1)
self.is_open=True
self.cursor = self.conn.cursor()
def select_table(self,table_name=None):
if table_name:
self.table_name=table_name
def find_exist(self,proxy):
self.execute_sql(f"SELECT * FROM {self.table_name} WHERE proxies='{proxy}';")
if self.cursor.fetchone():
return True
else:
return False
def execute_sql(self,sql):
try:
self.cursor.execute(sql)
self.conn.commit()
except:
self.row_count = -1
self.conn.rollback()
return False
return True
def create_table(self,table_name=None):
# table_field => ['ID','proxies','location','log_time']
if table_name:
self.table_name=table_name
_sql=f"CREATE TABLE IF NOT EXISTS {self.table_name} (" \
f"ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,{' TEXT,'.join(self.table_field)} " \
f"TEXT," \
f"log_time TIMESTAMP default (datetime('now', 'localtime'))" \
f");"
return self.execute_sql(_sql)
def insert(self,values):
_fmt=','.join([f'\'{{{i}}}\'' for i in range(len(self.table_field))])
_sql=f"INSERT INTO {self.table_name}(proxies,location) VALUES ({_fmt});".format(*values)
return self.execute_sql(_sql)
def remove(self,prefix='proxies',value=None):
_sql=f"DELETE FROM {self.table_name} WHERE {prefix}=\'{value}\';"
return self.execute_sql(_sql)
def update(self,proxies=None,new_value=None):
_sql=f"UPDATE {self.table_name} SET location=\'{new_value}\' WHERE proxies=\'{proxies}\';"
return self.execute_sql(_sql)
def is_closed(self):
return not self.is_open
def close(self):
if not self.is_closed():
self.cursor.close()
self.conn.close()
self.is_open = False
def fetch_one_row(self):
sql=f'SELECT * FROM {self.table_name};'
if not self.execute_sql(sql):
return None
row = self.cursor.fetchone()
while row:
yield row
row=self.cursor.fetchone()
class proxyMySQL(proxySQL):
def __init__(self,user,pwd,table_field=None,
table_name=None,database_name=None,
host='localhost',port=3306,overwrite=False):
super().__init__()
filterwarnings('ignore',category=pymysql.Warning)
self.overwrite=overwrite
self.database_name=database_name
self.table_name=table_name
self.is_open=False
self.host=host
self.port=port
self.user=user
self.pwd=<PASSWORD>
self.table_field=table_field
self.unique_export=False
self.connect_db()
def __del__(self):
if self.is_open:
self.close()
def connect_db(self):
if self.is_open:
return False
try:
self.conn = pymysql.Connection(host=self.host,user=self.user, password=<PASSWORD>,port=self.port,charset='utf8')
except pymysql.MySQLError as e:
CliMessage.print_with_status(e.args,Color.Red,Color.Bold,None,'failed')
CliMessage.print_with_status(f"Failed to connect MySQL Server...",Color.Red,Color.Bold,None,'failed')
exit(-1)
self.is_open=True
self.cursor = self.conn.cursor()
return True
def execute_sql(self, sql,*args):
try:
self.cursor.execute(sql,*args)
self.conn.commit()
except:
self.conn.rollback()
return False
return True
def select_database(self,database_name=None):
if database_name:
self.database_name = database_name
self.conn.select_db(database_name)
def select_table(self,table_name=None):
if table_name:
self.table_name = table_name
def create_database(self,database_name=None):
if database_name:
self.database_name=database_name
if self.overwrite==True:
sql_drop_db=f"DROP DATABASE IF EXISTS {self.database_name};"
self.execute_sql(sql_drop_db)
sql=f"CREATE DATABASE IF NOT EXISTS {self.database_name};"
return self.execute_sql(sql)
def create_table(self,table_name=None):
self.conn.select_db(self.database_name)
if table_name:
self.table_name=table_name
if self.overwrite:
sql_drop_tb=f"DROP TABLE IF EXISTS {self.database_name}.{self.table_name}"
self.execute_sql(sql_drop_tb)
_tbs=' VARCHAR(50),'.join(self.table_field)
sql=f"""
CREATE TABLE IF NOT EXISTS {self.database_name}.{self.table_name}(
ID INT AUTO_INCREMENT NOT NULL,
{_tbs} VARCHAR(50),
log_time DATETIME DEFAULT NOW(),
CONSTRAINT {self.table_name}_PK PRIMARY KEY (ID)
)ENGINE=InnoDB
DEFAULT CHARSET=utf8mb4
COLLATE=utf8mb4_unicode_ci;"""
return self.execute_sql(sql)
def find_exist(self,proxy):
self.execute_sql(f"SELECT * FROM {self.database_name}.{self.table_name} WHERE proxies='{proxy}';")
if self.cursor.fetchone():
return True
else:
return False
def insert(self,values):
_field=','.join(self.table_field)
_fmt=','.join(['%s']*len(self.table_field))
sql=f"INSERT INTO {self.database_name}.{self.table_name}({_field}) VALUES({_fmt});"
return self.execute_sql(sql,tuple(values))
def remove(self,value=None):
_sql=f"DELETE FROM {self.database_name}.{self.table_name} WHERE proxies='{value}';"
return self.execute_sql(_sql)
def update(self,proxies=None,new_value=None):
_sql=f"UPDATE {self.database_name}.{self.table_name} SET location=\'{new_value}\' WHERE proxies=\'{proxies}\';"
return self.execute_sql(_sql)
def is_closed(self):
try:
self.conn.ping(False)
return False
except:
return True
def close(self):
if not self.is_closed():
self.cursor.close()
self.conn.close()
self.is_open=False
def fetch_one_row(self):
sql=f'SELECT * FROM {self.database_name}.{self.table_name};'
if not self.execute_sql(sql):
return None
row = self.cursor.fetchone()
while row:
yield row
row=self.cursor.fetchone()
```
#### File: src/server/__init__.py
```python
class Logo(object):
@staticmethod
def text():
logo = """
________ __
_____________ _______ ______.__./ _____/ _____/ |_
\____ \_ __ \/ _ \ \/ < | / \ ____/ __ \ __\
| |_> > | \( <_> > < \___ \ \_\ \ ___/| |
| __/|__| \____/__/\_ \/ ____|\______ /\___ >__|
|__| \/\/ \/ \/
"""
return logo
``` |
{
"source": "joseyose/assetLoader",
"score": 2
} |
#### File: assetloader/ui/window.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(404, 388)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lbl_icon = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_icon.sizePolicy().hasHeightForWidth())
self.lbl_icon.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.lbl_icon.setFont(font)
self.lbl_icon.setText("")
self.lbl_icon.setPixmap(QtGui.QPixmap(":/icons/box"))
self.lbl_icon.setObjectName("lbl_icon")
self.horizontalLayout_3.addWidget(self.lbl_icon)
self.label_3 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.gridLayout.addLayout(self.horizontalLayout_3, 0, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.path_line = QtWidgets.QLineEdit(Dialog)
self.path_line.setObjectName("path_line")
self.horizontalLayout.addWidget(self.path_line)
self.btn_browser = QtWidgets.QToolButton(Dialog)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
self.btn_browser.setFont(font)
self.btn_browser.setObjectName("btn_browser")
self.horizontalLayout.addWidget(self.btn_browser)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.list_widget = QtWidgets.QListWidget(Dialog)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
self.list_widget.setFont(font)
self.list_widget.setStyleSheet("QScrollBar::sub-page:vertical {\n"
"background: none;\n"
"}\n"
"\n"
"QScrollBar::add-page:vertical {\n"
"background: none;\n"
"}\n"
"QScrollBar:vertical {\n"
" border: 2px solid grey;\n"
" background: #B8C6D9;\n"
" width: 15px;\n"
" margin: 5px 0 5px 0;\n"
" }\n"
" QScrollBar::handle:vertical {\n"
" background: #F2F2F2;\n"
" min-height: 5px;\n"
" }\n"
" QScrollBar::add-line:vertical {\n"
" border: 2px solid grey;\n"
" background: #B8C6D9;\n"
" height: 5px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
" }\n"
"\n"
" QScrollBar::sub-line:vertical {\n"
" border: 2px solid grey;\n"
" background: #B8C6D9;\n"
" height: 5px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
" }\n"
"")
self.list_widget.setResizeMode(QtWidgets.QListView.Adjust)
self.list_widget.setViewMode(QtWidgets.QListView.ListMode)
self.list_widget.setObjectName("list_widget")
self.gridLayout.addWidget(self.list_widget, 2, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.btn_load = QtWidgets.QPushButton(Dialog)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
self.btn_load.setFont(font)
self.btn_load.setObjectName("btn_load")
self.horizontalLayout_2.addWidget(self.btn_load)
self.btn_close = QtWidgets.QPushButton(Dialog)
font = QtGui.QFont()
font.setFamily("JetBrains Mono")
self.btn_close.setFont(font)
self.btn_close.setObjectName("btn_close")
self.horizontalLayout_2.addWidget(self.btn_close)
self.gridLayout.addLayout(self.horizontalLayout_2, 3, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Assets Loader - [BETA]"))
self.label_3.setText(_translate("Dialog", "Assets Loader"))
self.label.setText(_translate("Dialog", "Assets Directory"))
self.btn_browser.setText(_translate("Dialog", "..."))
self.btn_load.setText(_translate("Dialog", "Load"))
self.btn_close.setText(_translate("Dialog", "Close"))
from .resources import resources
``` |
{
"source": "joseyose/entrymaker",
"score": 2
} |
#### File: entrymaker/ui/widget.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1426, 614)
self.gridLayout_8 = QtWidgets.QGridLayout(Form)
self.gridLayout_8.setObjectName("gridLayout_8")
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setObjectName("gridLayout_7")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_description = QtWidgets.QLabel(Form)
self.label_description.setObjectName("label_description")
self.horizontalLayout.addWidget(self.label_description)
self.lineedit_description = QtWidgets.QLineEdit(Form)
self.lineedit_description.setObjectName("lineedit_description")
self.horizontalLayout.addWidget(self.lineedit_description)
self.gridLayout_7.addLayout(self.horizontalLayout, 1, 0, 1, 2)
self.button_export = QtWidgets.QPushButton(Form)
self.button_export.setObjectName("button_export")
self.gridLayout_7.addWidget(self.button_export, 3, 0, 1, 1)
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_edit = QtWidgets.QLabel(self.layoutWidget)
self.label_edit.setObjectName("label_edit")
self.verticalLayout.addWidget(self.label_edit)
self.textedit_edit = QtWidgets.QTextEdit(self.layoutWidget)
self.textedit_edit.setObjectName("textedit_edit")
self.verticalLayout.addWidget(self.textedit_edit)
self.layoutWidget1 = QtWidgets.QWidget(self.splitter)
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_preview = QtWidgets.QLabel(self.layoutWidget1)
self.label_preview.setObjectName("label_preview")
self.verticalLayout_2.addWidget(self.label_preview)
self.textedit_preview = QtWidgets.QTextEdit(self.layoutWidget1)
self.textedit_preview.setReadOnly(True)
self.textedit_preview.setObjectName("textedit_preview")
self.verticalLayout_2.addWidget(self.textedit_preview)
self.gridLayout_7.addWidget(self.splitter, 2, 0, 1, 2)
self.button_reset = QtWidgets.QPushButton(Form)
self.button_reset.setObjectName("button_reset")
self.gridLayout_7.addWidget(self.button_reset, 3, 1, 1, 1)
self.gridLayout_8.addLayout(self.gridLayout_7, 0, 1, 1, 1)
self.dock_properties = QtWidgets.QDockWidget(Form)
self.dock_properties.setMinimumSize(QtCore.QSize(420, 306))
self.dock_properties.setMaximumSize(QtCore.QSize(420, 524287))
self.dock_properties.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.dock_properties.setObjectName("dock_properties")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout_6 = QtWidgets.QGridLayout(self.dockWidgetContents)
self.gridLayout_6.setObjectName("gridLayout_6")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_note = QtWidgets.QLabel(self.dockWidgetContents)
self.label_note.setObjectName("label_note")
self.gridLayout.addWidget(self.label_note, 1, 0, 1, 1)
self.combobox_note = QtWidgets.QComboBox(self.dockWidgetContents)
self.combobox_note.setObjectName("combobox_note")
self.gridLayout.addWidget(self.combobox_note, 1, 1, 1, 1)
self.lineedit_source = QtWidgets.QLineEdit(self.dockWidgetContents)
self.lineedit_source.setMaximumSize(QtCore.QSize(400, 16777215))
self.lineedit_source.setObjectName("lineedit_source")
self.gridLayout.addWidget(self.lineedit_source, 0, 1, 1, 1)
self.label_source = QtWidgets.QLabel(self.dockWidgetContents)
self.label_source.setObjectName("label_source")
self.gridLayout.addWidget(self.label_source, 0, 0, 1, 1)
self.button_filedialog = QtWidgets.QToolButton(self.dockWidgetContents)
self.button_filedialog.setObjectName("button_filedialog")
self.gridLayout.addWidget(self.button_filedialog, 0, 2, 1, 1)
self.lineedit_tags = QtWidgets.QLineEdit(self.dockWidgetContents)
self.lineedit_tags.setObjectName("lineedit_tags")
self.gridLayout.addWidget(self.lineedit_tags, 2, 1, 1, 1)
self.label_tags = QtWidgets.QLabel(self.dockWidgetContents)
self.label_tags.setObjectName("label_tags")
self.gridLayout.addWidget(self.label_tags, 2, 0, 1, 1)
self.label_grokscore = QtWidgets.QLabel(self.dockWidgetContents)
self.label_grokscore.setObjectName("label_grokscore")
self.gridLayout.addWidget(self.label_grokscore, 3, 0, 1, 1)
self.slider_grokscore = QtWidgets.QSlider(self.dockWidgetContents)
self.slider_grokscore.setMaximum(4)
self.slider_grokscore.setProperty("value", 2)
self.slider_grokscore.setOrientation(QtCore.Qt.Horizontal)
self.slider_grokscore.setTickPosition(QtWidgets.QSlider.TicksAbove)
self.slider_grokscore.setObjectName("slider_grokscore")
self.gridLayout.addWidget(self.slider_grokscore, 3, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupbox_resources = QtWidgets.QGroupBox(self.dockWidgetContents)
self.groupbox_resources.setMinimumSize(QtCore.QSize(400, 0))
self.groupbox_resources.setMaximumSize(QtCore.QSize(400, 16777215))
self.groupbox_resources.setObjectName("groupbox_resources")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupbox_resources)
self.gridLayout_3.setObjectName("gridLayout_3")
self.scrollArea = QtWidgets.QScrollArea(self.groupbox_resources)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 374, 358))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_5 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_5.setObjectName("gridLayout_5")
self.grid_resources = QtWidgets.QGridLayout()
self.grid_resources.setObjectName("grid_resources")
self.label_resource1 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_resource1.setObjectName("label_resource1")
self.grid_resources.addWidget(self.label_resource1, 0, 0, 1, 1)
self.lineedit_resource1 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_resource1.setObjectName("lineedit_resource1")
self.grid_resources.addWidget(self.lineedit_resource1, 0, 1, 1, 1)
self.gridLayout_5.addLayout(self.grid_resources, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem, 1, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_3.addWidget(self.scrollArea, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupbox_resources, 1, 0, 1, 7)
self.label_numofresources = QtWidgets.QLabel(self.dockWidgetContents)
self.label_numofresources.setObjectName("label_numofresources")
self.gridLayout_2.addWidget(self.label_numofresources, 0, 0, 1, 1)
self.button_add = QtWidgets.QToolButton(self.dockWidgetContents)
self.button_add.setObjectName("button_add")
self.gridLayout_2.addWidget(self.button_add, 0, 1, 1, 1)
self.button_remove = QtWidgets.QToolButton(self.dockWidgetContents)
self.button_remove.setObjectName("button_remove")
self.gridLayout_2.addWidget(self.button_remove, 0, 2, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_2)
self.gridLayout_6.addLayout(self.verticalLayout_3, 0, 0, 1, 1)
self.dock_properties.setWidget(self.dockWidgetContents)
self.gridLayout_8.addWidget(self.dock_properties, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
Form.setTabOrder(self.lineedit_source, self.button_filedialog)
Form.setTabOrder(self.button_filedialog, self.combobox_note)
Form.setTabOrder(self.combobox_note, self.lineedit_tags)
Form.setTabOrder(self.lineedit_tags, self.slider_grokscore)
Form.setTabOrder(self.slider_grokscore, self.button_add)
Form.setTabOrder(self.button_add, self.button_remove)
Form.setTabOrder(self.button_remove, self.scrollArea)
Form.setTabOrder(self.scrollArea, self.lineedit_resource1)
Form.setTabOrder(self.lineedit_resource1, self.lineedit_description)
Form.setTabOrder(self.lineedit_description, self.textedit_edit)
Form.setTabOrder(self.textedit_edit, self.textedit_preview)
Form.setTabOrder(self.textedit_preview, self.button_export)
Form.setTabOrder(self.button_export, self.button_reset)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_description.setText(_translate("Form", "Description"))
self.lineedit_description.setPlaceholderText(_translate("Form", "Title description for the new entry. Keep under 50 characters"))
self.button_export.setText(_translate("Form", "Export"))
self.label_edit.setText(_translate("Form", "Edit"))
self.label_preview.setText(_translate("Form", "Preview"))
self.button_reset.setText(_translate("Form", "Reset"))
self.dock_properties.setWindowTitle(_translate("Form", "Properties"))
self.label_note.setText(_translate("Form", "Note"))
self.label_source.setText(_translate("Form", "Source"))
self.button_filedialog.setText(_translate("Form", "..."))
self.lineedit_tags.setPlaceholderText(_translate("Form", "Separated by commas"))
self.label_tags.setText(_translate("Form", "Tags"))
self.label_grokscore.setText(_translate("Form", "Grok Score"))
self.groupbox_resources.setTitle(_translate("Form", "Resources"))
self.label_resource1.setText(_translate("Form", "Resource #1"))
self.label_numofresources.setText(_translate("Form", "Number of Resources: 100"))
self.button_add.setText(_translate("Form", "..."))
self.button_remove.setText(_translate("Form", "..."))
```
#### File: src/entrymaker/views.py
```python
from ui.widget import Ui_Form
from PyQt5 import QtWidgets, QtGui # , QtCore, QtGui
from pathlib import Path
from exportmd import ExportMD
from sys import platform
WORDWRAP = 80
class Window(QtWidgets.QDialog, Ui_Form):
def __init__(self):
super(Window, self).__init__()
self._setupUI()
self.setWindowTitle("entry_maker::1.0")
self.configure_ui()
def enable_fields(self):
self.lineedit_description.setEnabled(True)
self.textedit_edit.setEnabled(True)
self.textedit_preview.setEnabled(True)
self.lineedit_tags.setEnabled(True)
self.button_add.setEnabled(True)
self.button_remove.setEnabled(True)
self.lineedit_resource1.setEnabled(True)
self.slider_grokscore.setEnabled(True)
self.button_reset.setEnabled(True)
def activate_export(self):
count = len(self.lineedit_description.text())
if count == 0:
self.button_export.setEnabled(False)
else:
self.button_export.setEnabled(True)
def _setupUI(self):
self.setupUi(self)
self.resources = [self.lineedit_resource1]
self.update_resourcescount()
def configure_ui(self):
# BUTTONS CLICKED
self.button_filedialog.clicked.connect(self.load_filedialog)
self.button_add.clicked.connect(self.add_resource)
self.button_remove.clicked.connect(self.remove_resource)
self.button_export.clicked.connect(self.export)
self.button_reset.clicked.connect(self.reset_ui)
# Enable buttons
self.button_export.setEnabled(False)
self.button_reset.setEnabled(False)
self.combobox_note.setEnabled(False)
self.lineedit_description.setEnabled(False)
self.textedit_edit.setEnabled(False)
self.textedit_preview.setEnabled(False)
self.lineedit_tags.setEnabled(False)
self.lineedit_resource1.setEnabled(False)
self.button_add.setEnabled(False)
self.button_remove.setEnabled(False)
self.slider_grokscore.setEnabled(False)
self.lineedit_description.textChanged.connect(self.changecolorpastlimit)
self.textedit_edit.textChanged.connect(self.set_markdown)
# Setup line wraps for the preview
self.textedit_edit.clear()
self.textedit_edit.setLineWrapMode(
QtWidgets.QTextEdit.FixedColumnWidth)
self.textedit_edit.setWordWrapMode(QtGui.QTextOption.WordWrap)
self.textedit_edit.setLineWrapColumnOrWidth(WORDWRAP)
self.textedit_preview.clear()
self.textedit_preview.setLineWrapMode(
QtWidgets.QTextEdit.FixedColumnWidth)
self.textedit_preview.setWordWrapMode(QtGui.QTextOption.WordWrap)
self.textedit_preview.setLineWrapColumnOrWidth(WORDWRAP)
# self.textEdit_2.setTextColor(QtGui.QColor(255, 0, 0))
self.textedit_edit.setTabStopDistance(
QtGui.QFontMetricsF(
self.textedit_edit.font()).horizontalAdvance(' ') * 4)
self.textedit_preview.setTabStopDistance(
QtGui.QFontMetricsF(
self.textedit_preview.font()).horizontalAdvance(' ') * 4)
# self.plainTextEdit.setTabStopDistance(
# QtGui.QFontMetricsF(
# self.plainTextEdit.font()).horizontalAdvance(' ') * 4)
# This was in the constructor prior to this
self.textedit_preview.setReadOnly(True)
self.lineedit_description.textChanged.connect(self.activate_export)
self.combobox_note.activated.connect(self.enable_fields)
# check for existing config file
self.check_init_dir()
def update_init_dir(self):
newpath = self.lineedit_source.text()
dirfile = Path("./bin/init.txt")
with open(dirfile, "w") as f:
f.write(newpath)
def check_init_dir(self):
dirfile = Path("./bin/init.txt")
if dirfile.exists():
with open(dirfile, "r") as f:
notes_path = f.read().splitlines()[0]
self.lineedit_source.setText(notes_path)
self.populate_combobox()
else:
print("Init file does not exist")
def load_filedialog(self):
if self.lineedit_source.text():
initDir = self.lineedit_source.text()
else:
initDir = str(Path.home())
# Load file dialog
path = QtWidgets.QFileDialog.getExistingDirectory(
self, "Select Notes Directory", initDir)
if path:
self.lineedit_source.setText(path)
# Populate qcombobox
self.populate_combobox()
def populate_combobox(self):
# Clear it first
self.combobox_note.clear()
# Good god this is a much shorter version of this, but it's basically
# doing the same thing :)
# md = Path(self.lineedit_source.text()).rglob("*.md")
notes = [""]
for path in Path(self.lineedit_source.text()).rglob("*.md"):
# make sure to check which os I'm launching from
if platform == "win32":
path = str(path).split("\\")[-1].split(".")[0]
else:
path = str(path).split("/")[-1].split(".")[0]
notes.append(path)
# Only modify this if we actually find markdown files
if len(notes) >= 2:
notes.sort()
self.combobox_note.setEnabled(True)
self.combobox_note.addItems(notes)
def add_resource(self):
count = self.grid_resources.count()
if count >= 2:
edit = QtWidgets.QLineEdit()
label = QtWidgets.QLabel()
self.grid_resources.addWidget(label, count, 0)
self.grid_resources.addWidget(edit, count, 1)
self.resources.append(edit)
label.setText(f"Resource #{len(self.resources)}")
self.update_resourcescount()
def changecolorpastlimit(self):
count = len(self.lineedit_description.text())
if count > 50:
self.lineedit_description.setStyleSheet("color: #F2C12E;")
else:
self.lineedit_description.setStyleSheet("color: black;")
def set_markdown(self):
text = self.textedit_edit.toPlainText()
self.textedit_preview.setMarkdown(text)
def remove_resource(self):
count = self.grid_resources.count()
if count > 2:
label = self.grid_resources.itemAt(count - 2)
field = self.grid_resources.itemAt(count - 1)
self.grid_resources.removeWidget(label.widget())
self.grid_resources.removeWidget(field.widget())
self.resources.pop()
self.update_resourcescount()
def update_resourcescount(self):
if self.grid_resources.count() > 1:
update = f"Number of Resources: {len(self.resources)}"
self.label_numofresources.setText(update)
def export(self):
tags = self.lineedit_tags.text().split(",")
resources = [i.text() for i in self.resources]
data = {"source": self.lineedit_source.text(),
"note": self.combobox_note.currentText(),
"tags": tags,
"grok": self.slider_grokscore.value() + 1,
"resources": resources,
"title": self.lineedit_description.text(),
"contents": self.textedit_edit.toPlainText()}
ExportMD(data).export()
self.update_init_dir()
def reset_ui(self):
self.textedit_edit.clear()
self.textedit_preview.clear()
self.lineedit_description.clear()
self.lineedit_tags.clear()
# Deal with the resources
num = self.grid_resources.count()
while (num > 1):
self.remove_resource()
num -= 1
self.resources[0].clear()
``` |
{
"source": "JoseZalez/Houdini-scripts",
"score": 3
} |
#### File: JoseZalez/Houdini-scripts/Instance_percentage_based.py
```python
#Create a Controller to set a percentage in a copy to points for each geometry
#https://www.linkedin.com/in/jose-gonzalezvfx/
def createexpression(controller):
list=[]
for parm in controller.parms():
if parm.name().startswith("percen"):
path=parm.path()
val="ch('{}')".format(path)
list.append(val)
#Create the string for the expression with a initial data
expression="{\n"+ "float v=0;\n"
n=1
list_set=[]
setval=""
for path in list:
expression= expression +"float set"+str(n)+ " = " + path + ";\n"
setval=setval+"set"+str(n)+"+"
list_set.append(setval)
n+=1
expression= expression + "if(pulse(int(point(-1,0,'range',0)),0,set1)) {v=0;} \n"
#Add the conditionals for the percentages
for j in range(len(list_set)-1):
expression= expression + "if(pulse(int(point(-1,0,'range',0)),{},{}))".format(list_set[j][:-1],list_set[j+1][:-1])
expression=expression+ " {"+"v={};".format(j+1)+"}\n"
#Close the expression
expression = expression + "return v;\n}"
return expression
#Main
#Get a list of the selected node
nodes = hou.selectedNodes()
if not nodes:
hou.ui.displayMessage("Please select the geometry nodes to scatter", buttons=('OK',), severity=hou.severityType.Message, default_choice=0, close_choice=0, title="Select a node",details_expanded=False)
geo=nodes[0].parent()
wrangle=geo.createNode("attribwrangle","Input_points")
controller=geo.createNode("null","Controller")
switch=geo.createNode("switch")
copytopts=geo.createNode("copytopoints")
block_begin_input=geo.createNode("block_begin")
compile_end=geo.createNode("compile_end")
block_end=geo.createNode("block_end")
#Set the block_end parms
block_begin_input.parm("method").set(1)
block_begin_input.parm("blockpath").set("../"+block_end.name())
block_end.parm("itermethod").set(1)
block_end.parm("method").set(1)
block_end.parm("useattrib").set(0)
block_end.parm("blockpath").set("../"+block_begin_input.name())
block_end.parm("templatepath").set("../"+block_begin_input.name())
block_end.parm("multithread").set(1)
#Add the rand attribute to the wrangle
wrangle.parm("snippet").set("f@range=rand(@ptnum)*100;")
#Add the percentages controls to the null node
ptg = controller.parmTemplateGroup()
parm_folder = hou.FolderParmTemplate('folder', 'Controls')
i=1
default=100/len(nodes)
for node in nodes:
parmtemplate=hou.IntParmTemplate('percentage_'+str(i), 'Geo '+str(i)+' %', 1,default_value=(default,0),min =0,max=100)
parm_folder.addParmTemplate(parmtemplate)
i+=1
ptg.append(parm_folder)
controller.setParmTemplateGroup(ptg)
#Create the compile and block begins and connect them to the switch
compile_begin_first=geo.createNode("compile_begin")
compile_begin_first.parm("blockpath").set("../"+compile_end.name())
for node in nodes:
compile_begin=geo.createNode("compile_begin")
compile_begin.parm("blockpath").set("../"+compile_end.name())
compile_begin.setInput(0,node)
block_begin_loop=geo.createNode("block_begin")
block_begin_loop.parm("method").set(3)
block_begin_loop.parm("blockpath").set("../"+block_end.name())
block_begin_loop.setInput(0,compile_begin)
switch.setNextInput(block_begin_loop)
#Connect the inputs from the nodes to create the network
compile_begin_first.setInput(0,wrangle)
block_begin_input.setInput(0,compile_begin_first)
copytopts.setInput(0,switch)
copytopts.setInput(1,block_begin_input)
block_end.setInput(0,copytopts)
compile_end.setInput(0,block_end)
switch.moveToGoodPosition()
copytopts.moveToGoodPosition()
pos=switch.position()
v1=hou.Vector2((2.0,0.0))
v2=hou.Vector2((4.0,0.0))
pos1=pos.__add__(v1)
pos2=pos.__add__(v2)
controller.setPosition(pos1)
wrangle.setPosition(pos2)
#Set the expression on the switch parm
switch.parm("input").setExpression(createexpression(controller))
ptg = switch.parmTemplateGroup()
parm_folder = hou.FolderParmTemplate('folder', 'Spare Input')
parmtemplate=hou.StringParmTemplate ('spare_input0','Spare Input 0', 1 ,naming_scheme=hou.parmNamingScheme.Base1, string_type=hou.stringParmType.FileReference, tags={ "opfilter" : "!!SOP!!", "oprelative" : ".", })
parm_folder.addParmTemplate(parmtemplate)
ptg.append(parm_folder)
switch.setParmTemplateGroup(ptg)
switch.parm("spare_input0").set("../"+block_begin_input.name())
``` |
{
"source": "josezambrana/covid19_bo",
"score": 3
} |
#### File: josezambrana/covid19_bo/extract.py
```python
import requests
import datetime
import numpy as np
import pandas as pd
today_dt = datetime.date.today()
today = today_dt.strftime('%Y-%m-%d')
OURWORLDINDATA = "https://covid.ourworldindata.org/data/owid-covid-data.csv"
REPO_DATA = (
'https://raw.githubusercontent.com/'
'mauforonda/covid19-bolivia/master/data.json'
)
def fetch_from_ourworldindata(iso_code=None):
response = requests.get(OURWORLDINDATA)
with open('ourworldindata.csv', 'w+') as f:
f.write(response.text)
with open(f'data/ourworldindata_{today}.csv', 'w+') as f:
f.write(response.text)
df = pd.read_csv('ourworldindata.csv')
return df
def fetch_from_covid19_bolivia_repo():
response = requests.get(REPO_DATA)
data = response.json()
rows = []
for item in data['confirmados']:
row = {'fecha': item['fecha']}
row.update(item['dep'])
rows.append(row)
cities = [
'la_paz', 'cochabamba', 'santa_cruz', 'oruro', 'potosí', 'tarija',
'chuquisaca', 'beni', 'pando'
]
df = pd.DataFrame(rows)
df['total'] = df[cities].sum(axis=1)
filtered = df[(['fecha', 'total'] + cities)]
filtered.columns = ['ds', 'y'] + cities
return filtered
def get_data(source='ourworldindata'):
if source == 'ourworldindata':
filtered = fetch_from_ourworldindata()
elif source == 'github':
return fetch_from_covid19_bolivia_repo()
elif filtered == 'boliviasegura':
url = 'https://boliviasegura.agetic.gob.bo/wp-content/json/api.php'
filtered = requests.get(url).json()
filtered.to_csv(f'data/{source}.csv', index=False)
filtered.to_csv(f'data/{source}_{today}.csv', index=False)
filtered.sort_values(by='ds', inplace=True)
return filtered
def get_population():
population = pd.read_csv('data/population.csv')
population = population[population['Year'] == '2019'].sort_values(['Population'], ascending=False)
population = population[pd.notna(population['Code'])]
return population
def get_full_data(source='ourworldindata', force=False):
if force:
df = fetch_from_ourworldindata()
else:
df = pd.read_csv(f'{source}.csv')
df = df[df['iso_code'] != 'OWID_WRL']
df = df[pd.notnull(df['iso_code'])]
population = get_population()
df = df.set_index('iso_code').join(population.set_index('Code')).reset_index()
df.rename(columns={'index': 'iso_code'}, inplace=True)
return df
``` |
{
"source": "JoseZamora97/gentool3dmaterials",
"score": 3
} |
#### File: gentool3dmaterials/gentool/basics.py
```python
from typing import List
class Environment:
def __init__(self, dimension: int = 0):
self.dimension = dimension
class Material:
class Kind:
STATIC_TEXTURE_AND_PARAMS = "static_static"
STATIC_TEXTURE_DYNAMIC_PARAMS = "static_dynamic"
DYNAMIC_TEXTURE_AND_PARAMS = "dynamic_dynamic"
class Texture:
RANDOM = "random"
GOLD = "gold"
MARBLE = "marble"
CRYSTAL = "crystal"
WOOD = "wood"
def __init__(self,
kind: str = "",
texture: str = "",
metallic: float = .0,
specular: float = .0,
roughness: float = .0
):
self.kind = kind
self.metallic = metallic
self.specular = specular
self.roughness = roughness
self.texture = texture
def static_texture_and_params(self,
texture: str = "",
metallic: float = .0,
specular: float = .0,
roughness: float = .0):
self.kind = Material.Kind.STATIC_TEXTURE_AND_PARAMS
self.texture = texture
self.metallic = metallic
self.specular = specular
self.roughness = roughness
return self
def static_texture_dynamic_params(self, texture):
self.kind = Material.Kind.STATIC_TEXTURE_DYNAMIC_PARAMS
self.texture = texture
return self
def dynamic_texture_and_params(self):
self.kind = Material.Kind.DYNAMIC_TEXTURE_AND_PARAMS
self.texture = Material.Texture.RANDOM
return self
class Object:
def __init__(self,
name: str,
path: str,
material: Material,
normalize: bool = True):
self.name = name
self.path = path
self.normalize = normalize
self.material = material
class Light:
class Kind:
STATIC_LIGHT = "static" # no se mueve, tiene un color fijo
DYNAMIC_LIGHT = "dynamic" # se mueve, tiene un color fijo
RAINBOW_STATIC_LIGHT = "rainbow_static_light" # no se mueve, cambia de color
RAINBOW_DYNAMIC_LIGHT = "rainbow_dynamic_light" # se mueve, cambia de color
def __init__(self,
kind: str = "",
color: list = None,
location: list = None,
max_range: int = 0,
max_energy: int = 10
):
self.kind = kind
self.color = color
self.location = location
self.max_range = max_range
self.max_energy = max_energy
def dynamic_light(self, color: list, max_range: int):
assert len(color) == 3, "color must have lenght of 3"
assert 0.0 <= color[0] <= 1.0, "r-channel must be between 0 and 1"
assert 0.0 <= color[1] <= 1.0, "g-channel must be between 0 and 1"
assert 0.0 <= color[2] <= 1.0, "b-channel must be between 0 and 1"
self.color = color
self.kind = self.Kind.DYNAMIC_LIGHT
self.max_range = max_range
return self
def static_light(self, color: list, location: list):
assert len(color) == 3, "color must have lenght of 3"
assert 0.0 <= color[0] <= 1.0, "r-channel must be between 0 and 1"
assert 0.0 <= color[1] <= 1.0, "g-channel must be between 0 and 1"
assert 0.0 <= color[2] <= 1.0, "b-channel must be between 0 and 1"
assert len(location) == 3, "location must have lenght of 3"
self.color = color
self.location = location
self.kind = self.Kind.DYNAMIC_LIGHT
return self
def rainbow_static_light(self, location: list, max_energy: int):
assert len(location) == 3, "location must have lenght of 3"
self.kind = self.Kind.RAINBOW_STATIC_LIGHT
self.location = location
self.max_energy = max_energy
return self
def rainbow_dynamic_light(self, max_range: float, max_energy: int):
self.kind = self.Kind.RAINBOW_DYNAMIC_LIGHT
self.max_range = max_range
self.max_energy = max_energy
return self
class Viewpoint:
class Kind:
STATIC_CAMERA = "static_camera"
DYNAMIC_CAMERA = "dynamic_camera"
OBJECT_PATH = "object_path"
def __init__(self, kind: str = "",
location: list = None,
amount: int = 0,
size: int = 0,
horizontal_divisions: int = 0,
vertical_divisions: int = 0,
max_range: int = 0
):
self.max_range = max_range
self.location = location
self.kind = kind
self.amount = amount
self.size = size
self.vertical_divisions = vertical_divisions
self.horizontal_divisions = horizontal_divisions
def static_camera_viewpoint(self,
location: List,
amount: int):
self.kind = self.Kind.STATIC_CAMERA
self.location = location
self.amount = amount
return self
def dynamic_camera_viewpoint(self, amount: int, max_range: int):
self.kind = self.Kind.DYNAMIC_CAMERA
self.amount = amount
self.max_range = max_range
return self
def espheric_path_viewpoint(self,
size: int,
horizontal_divisions: int,
vertical_divisions: int):
self.kind = self.Kind.OBJECT_PATH
self.size = size
self.horizontal_divisions = horizontal_divisions
self.vertical_divisions = vertical_divisions
return self
class Render:
class Style:
NORMAL = "normal"
SILHOUETTE = "silhouette"
TEXTURE_SEGMENTATION = "texture-segmentation"
RAY_TRACED = "ray-traced"
RASTERED = "rastered"
def __init__(self, resolution_x: int, resolution_y: int, output_dir_path: str, styles: List[str]):
self.resolution_x = resolution_x
self.resolution_y = resolution_y
self.output_dir_path = output_dir_path
self.styles = styles
```
#### File: JoseZamora97/gentool3dmaterials/operators.py
```python
from bpy.types import Operator
from .gentool.basics import Environment, Object, Material, Viewpoint, Render, Light
from .gentool.translator import ConfigIO, DatasetsGenerator, Config
from .gentool.utils import (Cleaner, DataGenApplyFuncts, Message)
class OperatorsEnd:
FINISHED = "FINISHED"
RUNNING_MODAL = "RUNNING_MODAL"
CANCELLED = "CANCELLED"
PASS_THROUGH = "PASS_THROUGH"
INTERFACE = "INTERFACE"
def create_config_from_gui(properties):
e = Environment(dimension=properties.scene_dimension)
m = Material(
kind=properties.material_kind,
texture=properties.choice_material
# metallic=properties.metallic,
# specular=properties.specular,
# roughness=properties.roughness
)
o = Object(
name='sample',
path=properties.input_model,
material=m,
normalize=properties.normalize
)
v = Viewpoint(
kind=properties.camera_kind,
location=properties.camera_location,
amount=properties.amount_shoots,
size=properties.camera_size,
horizontal_divisions=properties.camera_h_segments,
vertical_divisions=properties.camera_v_segments,
max_range=properties.camera_range_location
)
i = Light(
# kind=properties.light_kind,
# color=properties.light_color,
# location=properties.light_location,
# max_range=properties.light_range_location,
# max_energy=properties.max_energy
)
styles = []
if properties.style_normal:
styles.append(Render.Style.NORMAL)
if properties.style_silhouette:
styles.append(Render.Style.SILHOUETTE)
if properties.style_silhouette_colored:
styles.append(Render.Style.TEXTURE_SEGMENTATION)
if properties.style_ray_traced:
styles.append(Render.Style.RAY_TRACED)
if properties.style_rastered:
styles.append(Render.Style.RASTERED)
r = Render(
resolution_x=properties.render_resolution_x,
resolution_y=properties.render_resolution_y,
output_dir_path=properties.render_output_folder_path,
styles=styles
)
return Config(environment=e, render=r, objects=[o], lights=[i], viewpoints=[v])
def generate_renders(config: Config, preview: bool):
dataset_generator = DatasetsGenerator(
config=config,
functs=DataGenApplyFuncts(),
preview=preview
)
dataset_generator.setName('Dataset-Generator')
dataset_generator.run()
class OP_OT_GenerateScene(Operator):
"""
This class shows to the user the created objects where all generation
will proceed.
"""
bl_label = "Render a Sample"
bl_idname = "object.generate_scene"
def execute(self, context):
try:
config = create_config_from_gui(context.scene.tool)
generate_renders(config, preview=True)
Message.show(
title="Information",
message="Rendering the preview",
icon='INFO'
)
except Exception as e:
Cleaner.clear_scene()
Message.show(
title="Operation Canceled",
message=str(e),
icon='ERROR'
)
return {OperatorsEnd.CANCELLED}
return {OperatorsEnd.FINISHED}
class OP_OT_ClearScene(Operator):
"""
Clear the scene.
"""
bl_label = "Clear Scene"
bl_idname = "object.clear_scene"
def execute(self, _):
Cleaner.clear_scene()
return {OperatorsEnd.FINISHED}
class OP_OT_GenerateDataset(Operator):
bl_label = "Generate"
bl_idname = "object.generate_dataset"
bl_options = {'REGISTER'}
def execute(self, context):
tool = context.scene.tool
input_path = tool.input_presets_file
config = ConfigIO.json_loads(input_path) if tool.choice_render == 'FILE' \
else create_config_from_gui(tool)
generate_renders(config, preview=False)
return {OperatorsEnd.FINISHED}
```
#### File: JoseZamora97/gentool3dmaterials/panels.py
```python
from bpy.types import Panel
from .operators import OP_OT_ClearScene, OP_OT_GenerateDataset, OP_OT_GenerateScene
class ToolPanel:
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "GenTool3D"
class PL_PT_root(ToolPanel, Panel):
"""
Root holder of the Addon GUI.
"""
bl_label = "3D Multiview RenderManager Generator"
bl_idname = "PL_PT_root"
@classmethod
def poll(cls, _):
return True
def draw(self, _):
pass
# self.layout.label(text="Welcome to 3D Multiview RenderManager Generator")
class PL_PT_gui(ToolPanel, Panel):
"""
GUI Configurator holder.
"""
bl_label = "GUI Configurator (vBeta)"
bl_idname = "PL_PT_gui"
bl_parent_id = "PL_PT_root"
def draw(self, context):
layout = self.layout
tool = context.scene.tool
# Scene Options
layout.separator()
layout.label(text="Scene option:")
row = layout.row()
row.prop(tool, "scene_dimension")
# Model Options
layout.separator()
layout.label(text="Model options:")
layout.prop(tool, "input_model")
layout.prop(tool, "normalize")
# Materials options
layout.separator()
layout.label(text="Material options")
# row = layout.row()
# row.enabled = False
# row.prop(tool, 'material_kind')
layout.prop(tool, 'choice_material')
# layout.label(text="Material property:")
# row = layout.row()
# row.enabled = False
# row.prop(tool, 'metallic')
# row.prop(tool, 'specular')
# row.prop(tool, 'roughness')
# Camera options
layout.separator()
layout.label(text="Camera options")
row = layout.row()
row.prop(tool, 'camera_kind')
row.prop(tool, 'amount_shoots')
layout.prop(tool, 'camera_location')
layout.prop(tool, 'camera_range_location')
row = layout.row()
row.prop(tool, 'camera_size')
row.prop(tool, 'camera_h_segments')
row.prop(tool, 'camera_v_segments')
# Render Manager options
layout.separator()
layout.label(text="Render options:")
row = layout.row()
row.prop(tool, 'style_normal')
row.prop(tool, 'style_silhouette')
row = layout.row()
row.prop(tool, 'style_silhouette_colored')
row.prop(tool, 'style_ray_traced')
row = layout.row()
row.prop(tool, 'style_rastered')
layout.prop(tool, 'render_resolution_x')
layout.prop(tool, 'render_resolution_y')
layout.prop(tool, 'render_output_folder_path')
layout.separator()
preview_row = layout.row()
# preview_row.operator(OP_OT_GenerateScene.bl_idname)
preview_row.operator(OP_OT_ClearScene.bl_idname)
class PL_PT_file(ToolPanel, Panel):
"""
Generator settings holder.
"""
bl_label = "Import .json config"
bl_idname = "PL_PT_file"
bl_parent_id = "PL_PT_root"
def draw(self, context):
layout = self.layout
tool = context.scene.tool
layout.prop(tool, "input_presets_file")
class PL_PT_generator(ToolPanel, Panel):
"""
Generator settings holder.
"""
bl_label = "Generator"
bl_idname = "PL_PT_generator"
bl_parent_id = "PL_PT_root"
def draw(self, context):
layout = self.layout
tool = context.scene.tool
layout.prop(tool, "choice_render")
layout.separator()
layout.operator(OP_OT_GenerateDataset.bl_idname)
``` |
{
"source": "JoseZancanaro/Neurovisual-controller-celeste",
"score": 3
} |
#### File: project/src/digital_image_processing.py
```python
import cv2 as cv
import numpy as np
import time as timestamp
import os
import matplotlib.pyplot as plt
from tracker import EuclideanDistTracker
from window_capture import WindowCapture
from skimage.registration import phase_cross_correlation
from skimage.registration._phase_cross_correlation import _upsampled_dft
from scipy.ndimage import fourier_shift
def shrinking(image, scale=3):
width = int(image.shape[1] / scale)
height = int(image.shape[0] / scale)
dimension = (width, height)
# Resize image: Enlarging (INTER_LINEAR or INTER_CUBIC), shrinking (INTER_AREA)
return cv.resize(image, dimension, interpolation=cv.INTER_AREA)
def pre_processing(image):
# image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image = cv.medianBlur(image, 3)
return image
# https://docs.opencv.org/4.5.2/d1/d5c/tutorial_py_kmeans_opencv.html
# https://docs.opencv.org/4.5.2/d5/d38/group__core__cluster.html
def k_means_color_quantization(image, k=3):
# Reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
# Convert to float
pixel_values = np.float32(pixel_values)
# Criteria = (type, max_iteration, epsilon)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 1.0)
compactness, labels, centers = cv.kmeans(pixel_values, k, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)
# print("Compactness: ", compactness)
# print("\nLabels: ", labels)
# print("\nCenters: ", centers)
# Convert back to 8 bit values
center = np.uint8(centers)
# Flatten the labels array
label = labels.flatten()
segmented_image = center[label.flatten()]
# Reshape back to the original image dimension
segmented_image = segmented_image.reshape(image.shape)
return segmented_image
# https://docs.opencv.org/3.4.15/d0/d0a/classcv_1_1Tracker.html
# https://learnopencv.com/object-tracking-using-opencv-cpp-python/
def tracking_points(frame, tracker):
# Update tracker
success, bbox = tracker.update(frame)
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
points = [p1, p2]
return [success, points]
def frames_from_window(window_name, output_path, runtime=10):
# Initialize the WindowCapture class
win_cap = WindowCapture(window_name)
# Runtime control variables
loop_time = timestamp.time()
loop_end = loop_time + runtime
count = 1
# Tracking variables
tracker = cv.TrackerCSRT_create()
bbox = (5, 110, 20, 40) # native(320x180) - roi(5, 110, 20, 40) - another(5, 60, 30, 100)
first_frame = None
is_first_frame = True
while timestamp.time() < loop_end:
# Get an updated image of the window
screenshot = win_cap.get_screenshot()
# Reduces the captured image, pre-processing and k-means
native = shrinking(screenshot)
blur_image = pre_processing(native)
kmeans = k_means_color_quantization(blur_image)
# Tracking of the main character
if is_first_frame:
# Optional: define a bounty box by mouse
mouse_bbox = cv.selectROI(native, False)
tracker.init(native, mouse_bbox)
first_frame = native.copy()
is_first_frame = False
success, (p1, p2) = tracking_points(native, tracker)
# Draw the tracking in kmeans image copy
tracking = kmeans.copy()
if success:
tracking = cv.rectangle(tracking, p1, p2, (0, 0, 255), 1, 1)
print("Safe tracking")
# Press 't' to redefine the tracking with the initial frame and bbox
redefine_tracking_key = cv.waitKey(30) & 0xff
if redefine_tracking_key == ord("t"):
tracker.init(first_frame, bbox)
print("Redefined tracking")
# @TODO: Future work: applies vgg16 with the images as input
# Images prints
cv.imshow("Native resolution", native)
cv.imshow("Pre-processing", blur_image)
cv.imshow("K-means quantization", kmeans)
cv.imshow("Madeline tracking", tracking)
# If you want save the captured images
# cv.imwrite(output_path + "native/frame_%d.png" % count, native)
# cv.imwrite(output_path + "processed/frame_%d.png" % count, blur_image)
# cv.imwrite(output_path + "kmeans/frame_%d.png" % count, kmeans)
# cv.imwrite(output_path + "tracking/csrt/frame_%d.png" % count, tracking)
count += 1
# Debug the loop rate
print("FPS {}".format(1 / (timestamp.time() - loop_time)))
loop_time = timestamp.time()
cv.destroyAllWindows()
def simple_threshold(image):
ret, thresh1 = cv.threshold(image, 127, 255, cv.THRESH_BINARY)
ret, thresh2 = cv.threshold(image, 127, 255, cv.THRESH_BINARY_INV)
ret, thresh3 = cv.threshold(image, 127, 255, cv.THRESH_TRUNC)
ret, thresh4 = cv.threshold(image, 127, 255, cv.THRESH_TOZERO)
ret, thresh5 = cv.threshold(image, 127, 255, cv.THRESH_TOZERO_INV)
cv.imshow("Binary Threshold", thresh1)
cv.imshow("Binary Threshold Inverted", thresh2)
cv.imshow("Truncated Threshold", thresh3)
cv.imshow("Set to 0", thresh4)
cv.imshow("Set to 0 Inverted", thresh5)
def adaptive_threshold(image):
ret, thresh1 = cv.threshold(image, 127, 255, cv.THRESH_BINARY)
thresh2 = cv.adaptiveThreshold(image, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
thresh3 = cv.adaptiveThreshold(image, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
cv.imshow("Adaptive 01", thresh1)
cv.imshow("Adaptive 02", thresh2)
cv.imshow("Adaptive 03", thresh3)
def otsus_threshold(image):
# Global thresholding
ret, thresh1 = cv.threshold(image, 127, 255, cv.THRESH_BINARY)
# Otsu's thresholding
ret, thresh2 = cv.threshold(image, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(image, (5, 5), 0)
ret, thresh3 = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
cv.imshow("Otsu's 01", thresh1)
cv.imshow("Otsu's 02", thresh2)
cv.imshow("Otsu's 03", thresh3)
# https://docs.opencv.org/4.5.1/d7/d4d/tutorial_py_thresholding.html
def thresholding(image):
simple_threshold(image)
# Adaptive and Otsu's use grayscale image
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
adaptive_threshold(gray_image)
otsus_threshold(gray_image)
cv.waitKey()
def files_list_sort(images_path):
# Take the origin directory, change to images path, sort and restore to origin directory
initial_path = os.getcwd()
os.chdir(images_path)
files_list = sorted(filter(os.path.isfile, os.listdir(".")), key=os.path.getmtime)
os.chdir(initial_path)
return files_list
# https://docs.opencv.org/4.5.1/d8/d38/tutorial_bgsegm_bg_subtraction.html
# https://docs.opencv.org/4.5.2/d2/d55/group__bgsegm.html
def background_subtraction_type(bs_type):
if bs_type == "MOG2":
back_sub = cv.createBackgroundSubtractorMOG2()
elif bs_type == "KNN":
back_sub = cv.createBackgroundSubtractorKNN()
elif bs_type == "GMG":
back_sub = cv.bgsegm.createBackgroundSubtractorGMG()
elif bs_type == "LSBP":
back_sub = cv.bgsegm.createBackgroundSubtractorLSBP()
elif bs_type == "CNT":
back_sub = cv.bgsegm.createBackgroundSubtractorCNT()
elif bs_type == "GSOC":
back_sub = cv.bgsegm.createBackgroundSubtractorGSOC()
else:
back_sub = cv.bgsegm.createBackgroundSubtractorMOG()
return back_sub
# Background subtraction to a video
def background_subtraction_video_test(video_path, bs_type="MOG2"):
cap = cv.VideoCapture(video_path)
back_sub = background_subtraction_type(bs_type)
while True:
ret, frame = cap.read()
fg_mask = back_sub.apply(frame)
cv.imshow(bs_type, fg_mask)
# Press 'q' to stop
stop_key = cv.waitKey(30) & 0xff
if stop_key == ord("q"):
break
cap.release()
cv.destroyAllWindows()
# Background subtraction for a set of images
def background_subtraction_images_test(images_path, bs_type="MOG2"):
back_sub = background_subtraction_type(bs_type)
files_list = files_list_sort(images_path)
for filename in files_list:
file = os.path.join(images_path, filename)
print(file)
image = cv.imread(file)
fg_mask = back_sub.apply(image)
cv.imshow(bs_type, fg_mask)
# Press 'q' to stop
stop_key = cv.waitKey(30) & 0xff
if stop_key == ord("q"):
break
cv.destroyAllWindows()
# https://pysource.com/2021/01/28/object-tracking-with-opencv-and-python/
def tracking_detection(frame, tracker, back_sub):
height, width, _ = frame.shape
# Extract region of interest
roi = frame[0:height, 0:width]
# Object detection
mask = back_sub.apply(roi)
contours, _ = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
# Calculate area and remove small elements
area = cv.contourArea(cnt)
if area > 100:
x, y, w, h = cv.boundingRect(cnt)
detections.append([x, y, w, h])
# Object tracking
boxes_ids = tracker.update(detections)
for box_id in boxes_ids:
x, y, w, h, id = box_id
# cv.putText(roi, str(id), (x, y - 15), cv.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
cv.rectangle(roi, (x, y), (x + w, y + h), (0, 0, 255), 1, 1)
cv.imshow("ROI", roi)
return mask
# Object tracking detection for a set of images
def tracking_detection_images_test(images_path, bs_type="MOG2"):
files_list = files_list_sort(images_path)
# Create tracker and background subtraction
tracker = EuclideanDistTracker()
back_sub = background_subtraction_type(bs_type)
for filename in files_list:
file = os.path.join(images_path, filename)
print(file)
frame = cv.imread(file)
mask = tracking_detection(frame, tracker, back_sub)
cv.imshow("Mask", mask)
# Press 'q' to stop
stop_key = cv.waitKey(30) & 0xff
if stop_key == ord("q"):
break
cv.destroyAllWindows()
# https://scikit-image.org/docs/dev/auto_examples/registration/plot_register_translation.html
def img_reg_phase_cross_correlation_test(frame):
# The shift corresponds to the pixel offset relative to the reference image
shift = (-22.4, 13.32)
offset_image = fourier_shift(np.fft.fftn(frame), shift)
offset_image = np.fft.ifftn(offset_image)
print(f"Known offset (y, x): {shift}")
# Pixel precision first
shift, error, diff_phase = phase_cross_correlation(frame, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1)
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(frame, cmap="gray")
ax1.set_axis_off()
ax1.set_title("Reference image")
ax2.imshow(offset_image.real, cmap="gray")
ax2.set_axis_off()
ax2.set_title("Offset image")
# Show the output of a cross-correlation to show what the algorithm is doing behind the scenes
image_product = np.fft.fft2(frame) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print(f"Detected pixel offset (y, x): {shift}")
# Subpixel precision
shift, error, diff_phase = phase_cross_correlation(frame, offset_image, upsample_factor=100)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1)
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(frame, cmap="gray")
ax1.set_axis_off()
ax1.set_title("Reference image")
ax2.imshow(offset_image.real, cmap="gray")
ax2.set_axis_off()
ax2.set_title("Offset image")
# Calculate the upsampled DFT, again to show what the algorithm is doing behind the scenes.
# Constants correspond to calculated values in routine.
cc_image = _upsampled_dft(image_product, 150, 100, (shift * 100) + 75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print(f"Detected subpixel offset (y, x): {shift}")
# See https://github.com/YoshiRi/ImRegPOC to know how apply
def robust_img_reg_poc_test(frame, model):
# result = imregpoc.imregpoc(frame, model)
# print(result.getPerspective())
# result.stitching()
pass
# https://www.geeksforgeeks.org/image-registration-using-opencv-python/
def img_reg_opencv_test(frame, model):
img1_color = frame # Image to be aligned.
img2_color = model # Reference image.
# Convert to grayscale.
img1 = cv.cvtColor(img1_color, cv.COLOR_BGR2GRAY)
img2 = cv.cvtColor(img2_color, cv.COLOR_BGR2GRAY)
height, width = img2.shape
# Create ORB detector with 5000 features.
orb_detector = cv.ORB_create(5000)
# Find keypoints and descriptors.
kp1, d1 = orb_detector.detectAndCompute(img1, None)
kp2, d2 = orb_detector.detectAndCompute(img2, None)
# Match features between the two images.
# Create a brute force matcher with Hamming distance as measurement mode.
matcher = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
# Match the two sets of descriptors.
matches = matcher.match(d1, d2)
# Sort matches on the basis of their Hamming distance.
matches.sort(key=lambda x: x.distance)
# Take the top 90 % matches forward.
matches = matches[:int(len(matches) * 0.9)]
no_of_matches = len(matches)
# Define empty matrices of shape no_of_matches * 2.
p1 = np.zeros((no_of_matches, 2))
p2 = np.zeros((no_of_matches, 2))
for i in range(len(matches)):
p1[i, :] = kp1[matches[i].queryIdx].pt
p2[i, :] = kp2[matches[i].trainIdx].pt
# Find the homography matrix.
homography, mask = cv.findHomography(p1, p2, cv.RANSAC)
# Use this matrix to transform the colored image wrt the reference image.
transformed_img = cv.warpPerspective(img1_color, homography, (width, height))
cv.imshow("Output", transformed_img)
# https://github.com/jagracar/OpenCV-python-tests/blob/master/OpenCV-tutorials/featureDetection/fast.py
def fast_feature_detector_test(frame):
# Initiate FAST object with default values
fast = cv.FastFeatureDetector_create()
# Find and draw the keypoints
kp = fast.detect(frame, None)
img2 = cv.drawKeypoints(frame, kp, None, color=(255, 0, 0))
print("Threshold: ", fast.getThreshold())
print("NonmaxSuppression: ", fast.getNonmaxSuppression())
print("Neighborhood: ", fast.getType())
print("Total Keypoints with nonmaxSuppression: ", len(kp))
cv.imshow("Fast_true", img2)
# Disable nonmaxSuppression
fast.setNonmaxSuppression(0)
kp = fast.detect(frame, None)
print("Total Keypoints without nonmaxSuppression: ", len(kp))
img3 = cv.drawKeypoints(frame, kp, None, color=(255, 0, 0))
cv.imshow("fast_false", img3)
``` |
{
"source": "josezindia/syllus",
"score": 3
} |
#### File: syllus/app/allImports.py
```python
from __future__ import print_function
'''
Include all imports in this file; it will be called at the beginning of all files.
'''
# We need a bunch of Flask stuff
from flask import Flask
from flask import render_template
from flask import redirect
from flask import request
from flask import g
from flask import url_for
from flask import flash
from flask import abort
from flask_admin import Admin
import pprint
from app import models
from models import * # all the database models
import sys
sys.dont_write_bytecode = True
def authUser(env):
envK = "eppn"
if (envK in env): #Grabs env variables from shibboleth
# we need to sanitize the environment variable
# TODO: this looks like a function that can be taken out
return env[envK].split("@")[0].split('/')[-1].lower()
elif ("DEBUG" in app.config) and app.config["DEBUG"]:
old_username = cfg["DEBUG"]["user"]
converted_user = cfg["DEBUG"]["user"].split('@')[0].split('/')[-1].lower()
app.logger.info(old_username+ " converted to "+ converted_user)
return cfg["DEBUG"]["user"].split('@')[0].split('/')[-1].lower()
else:
return None
'''Creates the AbsolutePath based off of the relative path.
Also creates the directories in path if they are not found.
@param {string} relaitivePath - a string of directories found in config.yaml
@param {string} filename - the name of the file that should be in that directory
@return {string} filepath -returns the absolute path of the directory'''
'''TODO: ADD @PARAm for make dirs'''
def getAbsolutePath(relaitivePath,filename=None,makeDirs=False):
filepath = os.path.join(sys.path[0],relaitivePath)
if makeDirs == True:
try:
os.makedirs(filepath)
except:
pass
if filename != None:
filepath = os.path.join(filepath,filename)
return filepath
from app import logtool
log = logtool.Log()
''' Creates an Flask object; @app will be used for all decorators.
from: http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/
"A decorator is just a callable that takes a function as an argument and
returns a replacement function. See start.py for an example"
'''
app = Flask(__name__)
#from app import app
app.config.from_object('settings')
admin = Admin(app)
# Builds all the database connections on app run
# Don't panic, if you need clarification ask.
@app.before_request
def before_request():
g.dbMain = mainDB.connect()
@app.teardown_request
def teardown_request(exception):
dbM = getattr(g, 'db', None)
if (dbM is not None) and (not dbM.is_closed()):
dbM.close()
```
#### File: syllus/app/archive.py
```python
from allImports import *
from app.logic import databaseInterface
from app.logic.getAuthUser import AuthorizedUser
from app.logic.getAll import GetAll
@app.route("/archive/", defaults={'SEID': None}, methods = ["GET", "POST"])
@app.route("/archive/<SEID>", methods = ["GET", "POST"])
def archive(SEID):
# we need to know if the user is authorized to see this
authorizedUser = AuthorizedUser()
getAll = GetAll()
semesters = databaseInterface.get_all_semesters()
if SEID == None:
SEID = databaseInterface.grab_current_semester()
two_dictionaries = getAll.create_dictionaries(SEID)
current_term = Semesters.get(Semesters.SEID == SEID)
return render_template("archive.html",
cfg = cfg,
semesters = semesters,
current_term = current_term,
SEID = SEID,
isAdmin = authorizedUser.isAdmin,
divisions_to_programs = two_dictionaries[0],
programs_to_courses = two_dictionaries[1]
)
```
#### File: syllus/app/databaseAdmin.py
```python
from allImports import *
from flask_admin.contrib.peewee import ModelView
class AuthenticatedUser(ModelView):
column_display_pk = True
def is_accessible(self):
return authUser(request.environ) == cfg['databaseAdmin']['user']
admin.add_view(AuthenticatedUser(Semesters))
admin.add_view(AuthenticatedUser(Divisions))
admin.add_view(AuthenticatedUser(Programs))
admin.add_view(AuthenticatedUser(Users))
admin.add_view(AuthenticatedUser(Courses))
admin.add_view(AuthenticatedUser(UsersCourses))
admin.add_view(AuthenticatedUser(Deadline))
```
#### File: syllus/app/loadConfig.py
```python
import yaml, os
#For Logging
import logging
def load_config(file):
with open(file, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
return cfg
```
#### File: app/logic/databaseInterface.py
```python
from app.allImports import *
def grab_current_semester():
semesters = Semesters.select()
current = 0
for semester in semesters:
if semester.SEID > current:
current = semester.SEID
return current
def grab_all_divisions():
peeweeObj = Divisions.select().order_by(+Divisions.DID)
return peeweeObj
def grab_programs_in_division(DID):
peeweeObj = (Programs.select().where(Programs.DID == DID))
return peeweeObj
def grab_courses_in_program(PID,SEID):
peeweeObj = (UsersCourses
.select()
.join(Courses)
.order_by(+UsersCourses.username, +Courses.prefix, +Courses.number)
.where(
UsersCourses.CID == Courses.CID,
Courses.PID == PID,
Courses.SEID == SEID
))
return peeweeObj
def grab_my_courses(username,SEID):
my_courses = (UsersCourses
.select()
.join(Courses)
.where(
UsersCourses.username == username,
UsersCourses.CID == Courses.CID,
Courses.SEID == SEID
))
if my_courses.exists(): #checking whether query contains courses
peeweeObj = my_courses.execute()
return peeweeObj
else:
return None
def get_all_semesters():
semesters = Semesters.select()
return semesters
def get_division(DID):
division = Divisions.get(Divisions.DID == DID)
return division
def get_program(PID):
program = Programs.get(Programs.PID == PID)
return program
def get_course_info(CID):
course = (Courses
.select()
.join(Programs)
.join(Divisions)
.where(
Courses.CID == CID,
Courses.PID == Programs.PID,
Programs.DID == Divisions.DID
)).get()
return course
def get_course_file_path(CID):
course = get_course_info(CID)
file_path = str(cfg['fileOperations']['dataPaths']['uploads']) + str(course.filePath)
return file_path
def get_course_download_file_path(CID):
course = get_course_info(CID)
file_path = str(cfg['fileOperations']['dataPaths']['download']) + str(course.filePath)
return file_path
def get_course_instructors(CID):
instructors_string = ''
instructors = UsersCourses.select().where(UsersCourses.CID == CID)
for instructor in instructors:
print instructor.username.username
instructors_string += instructor.username.username
return instructors_string
def get_non_admins():
users = Users.select().where(Users.isAdmin == 0)
return users
def get_all_admins():
admins = Users.select().where(Users.isAdmin == 1)
return admins
def check_strings(strings):
for string in strings:
if string != '' or string != None:
result = isinstance(string,str)
if result == False:
return False
else:
return False
return True
def check_integers(integers):
for integer in integers:
result = isinstance(integer,int)
if result == False:
return False
return True
def insert_course(pre,num,sect,pid,seid):
try:
string_result = check_strings([pre,num,sect])
integer_result = check_integers([pid,seid])
if string_result == True and integer_result == True:
new_course = Courses(prefix=pre,number=num,section=sect,PID=pid,SEID=seid)
new_course.save()
else:
return False
return new_course
except Exception as e:
return False
def insert_course_user(un,cid):
try:
newInsert = UsersCourses(username=un,CID=cid)
newInsert.save()
if newInsert:
return newInsert
else:
return False
except Exception as e:
return False
```
#### File: app/logic/excelMaker.py
```python
import xlsxwriter
from app.allImports import *
import sys
def makeExcelFile(SEID):
filename = "syllus-{}-missing-syllabi.xlsx".format(SEID)
path = getAbsolutePath(cfg['fileOperations']['dataPaths']['tmp'],filename,False)
workbook = xlsxwriter.Workbook(path)
workbook.set_properties({
'title': 'Missing Syllabi for {}'.format(SEID),
'author': '<NAME>',
'comments': 'Created with Python and XlsxWriter'
})
master_worksheet = workbook.add_worksheet('All Courses')
master_worksheet.write('A1','Username')
master_worksheet.write('B1', 'First Name')
master_worksheet.write('C1', 'Last Name')
master_worksheet.write('D1', 'Email')
master_worksheet.write('E1', 'Course(s)')
master_row = 2
users = UsersCourses.select(UsersCourses.username).distinct().join(Courses).where(
Courses.filePath >> None,
Courses.SEID== SEID).order_by(
UsersCourses.username)
for user in users:
master_worksheet.write('A{}'.format(master_row),user.username.username)
master_worksheet.write('B{}'.format(master_row),user.username.firstName)
master_worksheet.write('C{}'.format(master_row),user.username.lastName)
master_worksheet.write('D{}'.format(master_row),user.username.email)
courses = UsersCourses.select().join(Courses).where(
UsersCourses.username == user.username.username,
Courses.filePath >> None,
Courses.SEID == SEID)
colLetter = 'D'
for c in courses:
colLetter = chr(ord(colLetter) + 1)
#Turns value in to next letter up
#E.G. D -> E
colName = colLetter + '{}'
course_info = c.CID.prefix+'-'+c.CID.number+'-'+c.CID.section
master_worksheet.write(colName.format(master_row),course_info)
master_row += 1
workbook.close()
return path
```
#### File: app/logic/getAuthUser.py
```python
from app.allImports import *
class AuthorizedUser:
def __init__(self):
self.username = authUser(request.environ)
self.isAdmin = self.get_user().isAdmin
def get_username(self):
'''returns the username of the user'''
return self.username
def get_user(self):
'''retruns the user object corresponding to the logged on user'''
user = Users.select().where(Users.username == self.username)
if user.exists():
return user[0]
else:
abort(403)
def user_level(self):
user = self.get_user()
try:
if user.isAdmin:
return 'admin'
elif user.PID is not None:
return 'program'
elif user.DID is not None:
return 'division'
else:
return 'faculty'
except:
return "error"
``` |
{
"source": "josezy/ikaro",
"score": 3
} |
#### File: ikaro/middleware/http2_middleware.py
```python
from django.conf import settings
PRELOAD_AS = {
'js': 'script',
'css': 'style',
'png': 'image',
'jpg': 'image',
'jpeg': 'image',
'webp': 'image',
'svg': 'image',
'gif': 'image',
'ttf': 'font',
'woff': 'font',
'woff2': 'font'
}
PRELOAD_ORDER = {
'css': 0,
'ttf': 1,
'woff': 1,
'woff2': 1,
'js': 2,
}
FILE_FILTER = getattr(settings, 'CLOUDFLARE_PUSH_FILTER', lambda x: True)
def record_file_to_preload(request, url):
"""save a staticfile to the list of files to push via HTTP2 preload"""
if not hasattr(request, 'to_preload'):
request.to_preload = set()
request.to_preload.add(url)
def create_preload_header(urls):
"""Compose the Link: header contents from a list of urls"""
without_vers = lambda url: url.split('?', 1)[0]
extension = lambda url: url.rsplit('.', 1)[-1].lower()
preload_priority = lambda url: PRELOAD_ORDER.get(url[1], 100)
urls_with_ext = ((url, extension(without_vers(url))) for url in urls)
sorted_urls = sorted(urls_with_ext, key=preload_priority)
preload_tags = (
f'<{url}>; rel=preload; crossorigin; as={PRELOAD_AS[ext]}'
if ext in PRELOAD_AS else
f'<{url}>; rel=preload; crossorigin'
for url, ext in sorted_urls
)
return ', '.join(preload_tags)
def HTTP2PushMiddleware(get_response):
def middleware(request):
"""Attach a Link: header containing preload links for every staticfile
referenced during the request by the {% http2static %} templatetag
"""
response = get_response(request)
if hasattr(request, 'to_preload'):
response['Link'] = create_preload_header(request.to_preload)
return response
return middleware
```
#### File: ikarodjango/panel/consumers.py
```python
import time
import asyncio
from channels.consumer import AsyncConsumer
from channels.exceptions import StopConsumer
from channels.db import database_sync_to_async
from panel.models import Drone, Room
from panel.utils import is_pilot
class MavlinkConsumer(AsyncConsumer):
can_receive = False
@database_sync_to_async
def get_drone_room(self, plate):
drone_qs = Drone.objects.filter(plate=plate.upper())
if not drone_qs.exists():
return None
return drone_qs[0].room
@database_sync_to_async
def get_room(self, room_id):
room_qs = Room.objects.filter(id__startswith=room_id)
if not room_qs.exists():
return None
return room_qs[0]
@database_sync_to_async
def add_viewer(self, n):
room = Room.objects.get(id__startswith=self.room_id)
room.total_viewers += n
room.save()
async def websocket_connect(self, event):
self.user = self.scope["user"]
type = self.scope["url_route"]["kwargs"].get("type", None)
id = self.scope["url_route"]["kwargs"].get("id", None)
if type == "room":
room = await self.get_room(id)
elif type == "plate":
self.can_receive = True
room = await self.get_drone_room(id)
else:
print("REJECTING CONNECTION", flush=True)
return await self.send({"type": "websocket.close"})
if not room:
print("REJECTING CONNECTION: no room", flush=True)
return await self.send({"type": "websocket.close"})
self.room_id = room.short_id
pilot = await database_sync_to_async(is_pilot)(self.room_id, self.scope["user"].id)
if self.user.is_authenticated and pilot:
self.can_receive = True
await self.send({"type": "websocket.accept"})
await self.channel_layer.group_add(
self.room_id,
self.channel_name
)
# await self.add_viewer(1)
async def websocket_receive(self, event):
if not self.can_receive:
return
mavmsg = event.get('text', None)
if mavmsg is not None:
await self.channel_layer.group_send(
self.room_id,
{
"type": "flight_message",
"mavmsg": mavmsg,
"sender_channel_name": self.channel_name
}
)
if "HEARTBEAT" in mavmsg:
print(f"[WS RECV] {time.ctime()} {self.channel_name}: {mavmsg}", flush=True)
async def flight_message(self, event):
if self.channel_name != event.get("sender_channel_name"):
mavmsg = event.get("mavmsg")
await self.send({
"type": "websocket.send",
"text": mavmsg
})
async def websocket_disconnect(self, event):
print("Gracefully disconnecting....")
await self.channel_layer.group_discard(
self.room_id,
self.channel_name
)
await self.send({"type": "websocket.close"})
# await self.add_viewer(-1)
raise StopConsumer()
```
#### File: ikarodjango/panel/tests.py
```python
# from django.test import TestCase
# from channels.db import database_sync_to_async
# # from channels.testing import HttpCommunicator
# from channels.testing import WebsocketCommunicator
# # from panel.consumers import PanelConsumer
# from channels.routing import get_default_application
# from ikaro.models import User
# from panel.models import Drone, Room
# class BaseTest(TestCase):
# def setUp(self) -> None:
# self. user = User.objects.create_user(
# username="jose", email="<EMAIL>", password="<PASSWORD>")
# self.drone = Drone.objects.create(plate="00000000", owner=self.user)
# self.room = Room.objects.create(drone=self.drone, host=self.user)
# class PanelConsumerTests(BaseTest):
# @database_sync_to_async
# def blah(self):
# print("self.room", self.room)
# print(f"ROOMS == ", Room.objects.all())
# async def test_consumer(self) -> None:
# communicator = WebsocketCommunicator(
# get_default_application(), f"/mavlink/{self.room.short_id}")
# communicator.scope['user'] = self.user # Trick to login
# await self.blah()
# connected, subprotocol = await communicator.connect()
# assert connected
# # Test sending text
# await communicator.send_to(text_data="hello")
# response = await communicator.receive_from()
# assert response == "hello"
# # Close
# await communicator.disconnect()
# # communicator = HttpCommunicator(PanelConsumer, "GET", "/test/")
# # response = await communicator.get_response()
# # self.assertEqual(response["body"], b"test response")
# # self.assertEqual(response["status"], 200)
```
#### File: ikarodjango/panel/utils.py
```python
from panel.models import Room
def is_pilot(room_id, user_id) -> bool:
room = Room.objects.get(id__startswith=room_id)
return room.drone.owner.id == user_id or room.host.id == user_id
```
#### File: ikarodjango/ui/urls.py
```python
from django.urls import path, include
from django.http import HttpResponse
from panel.models import Room
from panel.views import FlightPanel, Spectators
from ui.views.pages import Looby
from ui.views.accounts import Login, Logout, Signup
def reset_viewers_count():
for room in Room.objects.all():
room.total_viewers = 0
room.save()
# reset_viewers_count()
urlpatterns = [
path('', Looby.as_view(), name='home'),
path('accounts/login/', Login.as_view(), name='login'),
path('accounts/signup/', lambda r: HttpResponse(
"Error", content_type="text/plain"), name='signup'),
path('genesis/', Signup.as_view()),
path('accounts/logout/', Logout.as_view(), name='logout'),
path('accounts/', include('django.contrib.auth.urls')),
path('flight/<str:id>', FlightPanel.as_view(), name='flight_room'),
path('spectators/<str:id>', Spectators.as_view()),
]
``` |
{
"source": "josezy/tukano",
"score": 3
} |
#### File: tukano/src/timer.py
```python
import time
class Timer:
def __init__(self, timers={}):
now = time.time()
self.timers = timers
self.last_tss = {tn: now for tn in timers.keys()}
def update_elapsed_times(self):
now = time.time()
self.elapsed_times = {
tn: now - self.last_tss[tn] for tn in self.timers.keys()
}
def time_to(self, timer_name):
assert timer_name in self.timers, \
f"Timer {timer_name} does not exists: {self.timers}"
if self.elapsed_times[timer_name] > self.timers[timer_name]:
self.last_tss[timer_name] = time.time()
return True
return False
```
#### File: tukano/src/tukano_service.py
```python
import ssl
import json
import time
import settings
import logging
import traceback
import websocket
import typing as ty
from pymavlink import mavutil
from websocket import create_connection
from tasks import collect_data, prepare_data
from camera import Camera
from actuators import Hook
from util import leds
from timer import Timer
def connect_drone():
while True:
try:
drone = mavutil.mavlink_connection(**settings.MAVLINK_TUKANO)
break
except Exception as e:
logging.warning(f"MAVLink vehicle connection failed: {e}")
logging.warning("Retrying...")
return drone
def is_trustable(msg, vehicle):
return all([
msg.get_srcSystem() == vehicle['system_id'],
msg.get_srcComponent() == vehicle['component_id']
])
def cleanup_msg(link):
msg = link.recv_msg()
msg_type = msg and msg.get_type()
message_is_valid = msg_type and msg_type != 'BAD_DATA'
return msg if message_is_valid else None
def update_vehicle_state(msg, vehicle):
if is_trustable(msg, vehicle):
msg_type = msg.get_type()
if msg_type == 'HEARTBEAT':
vehicle['armed'] = bool(msg.base_mode & 2**7)
vehicle['system_id'] = msg.get_srcSystem()
vehicle['component_id'] = msg.get_srcComponent()
if msg_type == 'GLOBAL_POSITION_INT':
vehicle['position'] = {
'lat': float(msg.lat) / 10**7,
'lon': float(msg.lon) / 10**7,
'alt': float(msg.alt) / 10**3,
}
if msg_type == "SYS_STATUS":
vehicle['battery'] = msg.battery_remaining
return vehicle
def create_cloud_link(endpoint) -> None:
try:
logging.info(f"Creating cloud link at {endpoint}")
link = create_connection(endpoint, **settings.WS_CONNECTION_PARAMS)
link.settimeout(0)
logging.info("Connected!")
return link
except Exception as e:
logging.error(f"Cloud link error: {e}")
def mav_data_to_cloud(link, msg) -> None:
if msg.get_type() not in settings.WS_MAV_MSG_TYPES:
return
try:
link.send(json.dumps({
'srcSystem': msg.get_srcSystem(),
'srcComponent': msg.get_srcComponent(),
**msg.to_dict()
}))
if msg.get_type() == "HEARTBEAT":
logging.debug(f"[MAV DATA TO CLOUD] {msg.to_json()}")
except (
BrokenPipeError,
websocket.WebSocketConnectionClosedException,
OSError,
) as e:
logging.error(f"[MAV DATA SEND] Cloud link error: {e}")
link.close()
def mav_data_from_cloud(link):
mavmsg = None
try:
recv_str = link.recv()
msg = json.loads(recv_str)
if any([
'command' in msg,
'message' in msg,
]):
mavmsg = msg
logging.debug(f"[MAV DATA FROM CLOUD] {recv_str}")
except (
BrokenPipeError,
websocket.WebSocketConnectionClosedException,
ConnectionResetError,
) as e:
logging.error(f"[MAV DATA RECV] Broken pipe. Cloud link error: {e}")
except (
BlockingIOError,
json.JSONDecodeError,
ssl.SSLWantReadError,
OSError,
):
pass
return mavmsg
def command_to_drone(drone, command: ty.Dict[str, ty.Any]) -> ty.NoReturn:
mavcmd = command.get('command')
target_system = command.get('target_system')
target_component = command.get('target_component')
params = command.get('params')
params = [params.get(f"param{i+1}", 0) for i in range(7)]
drone.mav.command_long_send(
target_system,
target_component,
getattr(mavutil.mavlink, mavcmd),
0, # confirmation (not used yet)
*params
)
logging.debug(f"[COMMAND TO DRONE] Delivered command {mavcmd} with params: {params}")
def message_to_drone(drone, message: ty.Dict[str, ty.Any]) -> ty.NoReturn:
mavmsg = message.get('message')
params = message.get('params')
args = [
val.encode() if type(val) == str else val
for val in params.values()
]
mavmsg_send = getattr(drone.mav, f"{mavmsg.lower()}_send")
mavmsg_send(*args)
logging.debug(f"[MESSAGE TO DRONE] Delivered message {mavmsg} with args: {args}")
def tukano_command(command: ty.Dict[str, ty.Any]) -> ty.NoReturn:
tukano_cmd = command.get('command')
# params = command.get('params')
if tukano_cmd == 'TUKANO_RELEASE_HOOK':
hook.release()
# =============[From here to down hell]================
logging.basicConfig(**settings.LOGGING_KWARGS)
logging.info(f"Initialising vehicle at {settings.MAVLINK_TUKANO['device']}")
leds.led_on('red')
drone = connect_drone()
heartbeat = drone.wait_heartbeat()
logging.info("Hearbeat received!")
vehicle = {
'system_id': heartbeat.get_srcSystem(),
'component_id': heartbeat.get_srcComponent(),
'armed': False,
'position': None,
'battery': None,
}
drone.mav.request_data_stream_send(
vehicle['system_id'],
vehicle['component_id'],
mavutil.mavlink.MAV_DATA_STREAM_ALL,
10, # Rate in Hertz
1 # Start/Stop
)
hook = Hook()
timer = Timer({
'collect_data': settings.DATA_COLLECT_TIMESPAN,
'send_data': settings.MAVLINK_SAMPLES_TIMESPAN,
'take_pic': settings.TAKE_PIC_TIMESPAN,
})
if any((settings.TAKE_PIC, settings.RECORD)):
cam = Camera()
cloud_mav_link = create_cloud_link(settings.WS_MAV_ENDPOINT)
cloud_last_heartbeat = time.time()
leds.led_on('blue')
red_on = False
while True:
time.sleep(settings.SLEEPING_TIME)
try:
mav_msg = cleanup_msg(drone)
if mav_msg is not None:
vehicle = update_vehicle_state(mav_msg, vehicle)
if cloud_mav_link is not None and cloud_mav_link.connected:
if mav_msg is not None:
mav_data_to_cloud(cloud_mav_link, mav_msg)
cloud_data = mav_data_from_cloud(cloud_mav_link)
if cloud_data and 'command' in cloud_data:
if cloud_data['command'].startswith('TUKANO'):
tukano_command(cloud_data)
else:
command_to_drone(drone, cloud_data)
if cloud_data and 'message' in cloud_data:
if cloud_data.get('message') == 'HEARTBEAT':
cloud_last_heartbeat = time.time()
leds.led_on('green')
red_on = False
message_to_drone(drone, cloud_data)
else:
logging.error("No cloud_mav_link, recreating...")
cloud_mav_link = create_cloud_link(settings.WS_MAV_ENDPOINT)
time.sleep(1)
if time.time() - cloud_last_heartbeat > 2 and not red_on:
leds.led_on('red')
red_on = True
# =================[ Tasks ]=================
timer.update_elapsed_times()
if settings.DATA_COLLECT:
if timer.time_to('collect_data'):
if vehicle['position']:
veh_alt = vehicle['position']['alt']
if veh_alt > settings.DATA_COLLECT_MIN_ALT:
collect_data(vehicle['position'])
if timer.time_to('send_data'):
package = prepare_data()
if package:
pack_len = len(package)
logging.debug(f"Sending data ({pack_len}): {package}")
if pack_len > 1048:
logging.warning("Message too long: Truncating...")
# drone.mav.tukano_data_send(package)
# logging.info("Data sent to ground")
tukano_msg = drone.mav.tukano_data_encode(package)
mav_data_to_cloud(cloud_mav_link, tukano_msg)
logging.info("Data sent to cloud")
if settings.TAKE_PIC and timer.time_to('take_pic'):
if vehicle['armed'] and vehicle['battery'] > 30:
if vehicle['position']:
pic_name = cam.take_pic(gps_data={
'lat': vehicle['position']['lat'],
'lon': vehicle['position']['lon'],
'alt': vehicle['position']['alt']
})
else:
pic_name = cam.take_pic()
logging.info(f"Pic taken '{pic_name}'")
if settings.RECORD:
if vehicle['armed'] and not cam.is_recording:
vid_name = cam.start_recording()
logging.info(f"Recording video '{vid_name}'")
if not vehicle['armed'] and cam.is_recording:
vid_name = cam.stop_recording()
logging.info(f"Video recordered '{vid_name}'")
except Exception as e:
leds.led_on('red')
logging.error(f"Main loop error: {e}")
traceback.print_exc()
```
#### File: src/util/leds.py
```python
import sys
sys.path.append("..")
import settings
from settings import LED_PINS
if settings.PROD:
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for led_pin in LED_PINS.values():
GPIO.setup(led_pin, GPIO.OUT)
def led_on(color):
print(f"{color} led ON")
if settings.PROD:
for led_color, led_pin in LED_PINS.items():
state = GPIO.HIGH if led_color == color else GPIO.LOW
GPIO.output(led_pin, state)
def led_off():
print("Leds OFF")
if settings.PROD:
for _, led_pin in LED_PINS.items():
GPIO.output(led_pin, GPIO.LOW)
def error():
led_on('red')
def info():
led_on('blue')
def success():
led_on('green')
``` |
{
"source": "josflesan/SkunkBooth",
"score": 2
} |
#### File: skunkbooth/utils/fileIO.py
```python
import logging
from os import path
from sys import platform
from typing import List, Tuple
from cv2 import VideoWriter, VideoWriter_fourcc, imwrite
from numpy import array
from PIL import Image, ImageDraw, ImageFont
from skunkbooth.data import constants
class IOBase:
"""ASCII conversion module, subclass for access to convert()"""
def __init__(self, font: str = "Hack", fontSize: int = 30):
self.fx, self.fy = 0, 0
self.setFont(font, fontSize)
self.maxCache = 5000
def setFont(self, fp: str, size: int) -> None:
"""Set render font, fall back to Input if font name is invalid"""
fp = f'{path.join(path.dirname(path.abspath(__file__)), "..", "data", fp)}.ttf'
try:
self.font = ImageFont.truetype(fp, size)
except OSError:
logging.error(f"Font {fp} not found, falling back.")
fp = f'{path.join(path.dirname(path.abspath(__file__)), "..", "data", "Hack.ttf")}'
self.font = ImageFont.truetype(fp, size)
px, py = self.fx, self.fy
self.fx, self.fy = self.font.getsize("g")
self.glyphs = {}
self.renderCache = {}
self.bold = max(1, size // 30)
if px != self.fx or py != self.fy:
self.colours = {i: Image.new("RGB", (self.fx, self.fy)) for i in range(256)}
for i in self.colours:
self.colours[i].paste(
tuple(reversed(constants.palette[i * 3:i * 3 + 3])),
(0, 0, self.fx, self.fy),
)
self.underline = Image.new("L", (self.fx, self.fy))
self.underline.paste(255, (0, self.fy - self.fy // 11, self.fx, self.fy))
def write(self, image: List[List[Tuple[int, int, int]]]) -> bool:
"""Template method for write()"""
return False
def convert(self, image: List[List[Tuple[int, int, int, int]]]) -> array:
"""
Converts ASCII images to opencv format images to use in opencv writers.
Pixels formatted as (keycode, attribute, 8 bit colour)
"""
ay = len(image)
ax = len(image[0])
out = Image.new("RGB", (ax * self.fx, ay * self.fy))
for y in range(ay):
for x in range(ax):
# Just draw background if character is empty
pixel = image[y][x]
if len(pixel) == 1:
out.paste(self.colours[pixel[0]], (x * self.fx, y * self.fy))
continue
# Render character, (text colour, bg colour, char, attribute)
if pixel in self.renderCache:
render = self.renderCache[pixel]
else:
fg = self.colours[pixel[2]]
bg = self.colours[pixel[3]]
attr = pixel[1]
char = pixel[0]
# Attributes
charID = char + "bd" if attr in constants.L_BOLD else char
if charID in self.glyphs:
glyph = self.glyphs[charID]
else:
# Cache character glyphs
glyph = Image.new("L", (self.fx, self.fy))
ImageDraw.Draw(glyph).text(
(0, 0),
char,
255,
font=self.font,
stroke_width=self.bold if charID[-2:] == "bd" else 0,
)
self.glyphs[charID] = glyph
if attr in constants.L_REVERSE:
fg, bg = bg, fg
if attr in constants.L_UNDERLINE:
glyph = glyph.copy()
glyph.paste(self.underline, (0, 0), self.underline)
render = self.renderCache[pixel] = Image.composite(fg, bg, glyph)
if len(self.renderCache) > self.maxCache:
self.renderCache.pop(next(iter(self.renderCache)))
out.paste(render, (x * self.fx, y * self.fy))
return array(out)
class VideoIO(IOBase): # TODO: Other video filetypes
"""ASCII to video saver"""
def __init__(
self,
dim: Tuple[int, int] = None,
fps: int = 60,
dest: str = "gallery/out.avi",
**kwargs,
):
"""
Set image dimensions and destination, dimensions must remain constant while recording.
dim: Character dimensions of the ASCII video. Inferred from first frame if missing
dest: File destination to save to.
"""
super().__init__(**kwargs)
if dim is None:
self.dest = dest
self.fps = fps
else:
self.dest = VideoWriter(
dest,
VideoWriter_fourcc(*"DIVX"),
fps,
(dim[0] * self.fx, dim[1] * self.fy),
)
def write(self, image: List[List[Tuple[int, int, int]]]) -> bool:
"""Write a frame to the video."""
if self.dest is None:
raise Exception("Attempted write to closed file")
elif isinstance(self.dest, str):
self.dest = VideoWriter(
self.dest,
VideoWriter_fourcc(*"DIVX"),
self.fps,
(len(image[0]) * self.fx, len(image) * self.fy),
)
try:
self.dest.write(self.convert(image))
return True
except Exception:
return False
def close(self) -> None:
"""Close and save the video file."""
try:
self.dest.release()
except Exception:
return
self.dest = None
class ImageIO(IOBase):
"""ASCII to image saver"""
def __init__(self, dest: str = "Gallery/SaveImage.jpg", **kwargs):
super().__init__(**kwargs)
self.dest = dest
def write(self, image: List[List[Tuple[int, int, int]]], dest: str = None) -> bool:
"""For writing image to file"""
return imwrite(dest if dest else self.dest, self.convert(image))
class AsciiIO(IOBase):
"""ASCII to text saver"""
def __init__(self, dest: str = "out", **kwargs):
super().__init__(**kwargs)
self.dest = dest
if platform == "win32":
self.dest += ".txt"
def write(self, image: List[List[Tuple[int, int, int]]], dest: str = None) -> bool:
"""For writing image to file"""
with open(dest if dest else self.dest, "w") as f:
return f.write("\n".join("".join(j[0] for j in i) for i in image))
``` |
{
"source": "josflorap/robotframework-tidy",
"score": 2
} |
#### File: robotframework-tidy/robotidy/api.py
```python
from typing import Optional
from robotidy.app import Robotidy
from robotidy.cli import find_and_read_config, TransformType, validate_regex
from robotidy.files import DEFAULT_EXCLUDES
from robotidy.utils import GlobalFormattingConfig
class RobotidyAPI(Robotidy):
def __init__(self, src: str, output: Optional[str], **kwargs):
config = find_and_read_config((src,))
config = {k: str(v) if not isinstance(v, (list, dict)) else v for k, v in config.items()}
converter = TransformType()
transformers = [converter.convert(tr, None, None) for tr in config.get("transform", ())]
configurations = [converter.convert(c, None, None) for c in config.get("configure", ())]
formatting_config = GlobalFormattingConfig(
space_count=kwargs.get("spacecount", None) or int(config.get("spacecount", 4)),
separator=kwargs.get("separator", None) or config.get("separator", "space"),
line_sep=config.get("lineseparator", "native"),
start_line=kwargs.get("startline", None) or int(config["startline"]) if "startline" in config else None,
end_line=kwargs.get("endline", None) or int(config["endline"]) if "endline" in config else None,
)
exclude = config.get("exclude", None)
extend_exclude = config.get("extend_exclude", None)
exclude = validate_regex(exclude if exclude is not None else DEFAULT_EXCLUDES)
extend_exclude = validate_regex(extend_exclude)
super().__init__(
transformers=transformers,
transformers_config=configurations,
src=(),
exclude=exclude,
extend_exclude=extend_exclude,
overwrite=False,
show_diff=False,
formatting_config=formatting_config,
verbose=False,
check=False,
output=output,
force_order=False,
)
def transform_model(model, root_dir: str, output: Optional[str] = None, **kwargs) -> Optional[str]:
"""
:param model: The model to be transformed.
:param root_dir: Root directory. Configuration file is searched based
on this directory or one of its parents.
:param output: Path where transformed model should be saved
:param kwargs: Default values for global formatting parameters
such as ``spacecount``, ``startline`` and ``endline``.
:return: The transformed model converted to string or None if no transformation took place.
"""
transformer = RobotidyAPI(root_dir, output, **kwargs)
diff, _, new_model = transformer.transform(model)
if not diff:
return None
return new_model.text
```
#### File: robotidy/transformers/AlignTestCases.py
```python
from robot.api.parsing import (
ModelTransformer,
Token,
EmptyLine,
Comment,
ModelVisitor,
ForHeader,
End,
IfHeader,
ElseHeader,
ElseIfHeader,
)
from robotidy.decorators import check_start_end_line
from robotidy.utils import round_to_four, is_suite_templated
class AlignTestCases(ModelTransformer):
"""
Align Test Cases to columns.
Currently only templated tests are supported. Following code:
*** Test Cases *** baz qux
# some comment
test1 hi hello
test2 long test name asdfasdf asdsdfgsdfg
will be transformed to:
*** Test Cases *** baz qux
# some comment
test1 hi hello
test2 long test name asdfasdf asdsdfgsdfg
bar1 bar2
If you don't want to align test case section that does not contain header names (in above example baz and quz are
header names) then configure `only_with_headers` parameter:
robotidy -c AlignSettingsSection:only_with_hedaers:True <src>
Supports global formatting params: ``--startline``, ``--endline``.
See https://robotidy.readthedocs.io/en/latest/transformers/AlignTestCases.html for more examples.
"""
ENABLED = False
def __init__(self, only_with_headers: bool = False):
self.only_with_headers = only_with_headers
self.templated = False
self.widths = None
self.test_name_len = 0
self.name_line = 0
self.indent = 0
def visit_File(self, node): # noqa
if not is_suite_templated(node):
return node
return self.generic_visit(node)
def visit_If(self, node): # noqa
self.indent += 1
self.generic_visit(node)
self.indent -= 1
return node
def visit_Else(self, node): # noqa
self.indent += 1
self.generic_visit(node)
self.indent -= 1
return node
def visit_ElseIf(self, node): # noqa
self.indent += 1
self.generic_visit(node)
self.indent -= 1
return node
def visit_For(self, node): # noqa
self.indent += 1
self.generic_visit(node)
self.indent -= 1
return node
@check_start_end_line
def visit_TestCaseSection(self, node): # noqa
if len(node.header.data_tokens) == 1 and self.only_with_headers:
return node
counter = ColumnWidthCounter(self.formatting_config)
counter.visit(node)
self.widths = counter.widths
return self.generic_visit(node)
@check_start_end_line
def visit_Statement(self, statement): # noqa
if statement.type == Token.TESTCASE_NAME:
self.test_name_len = len(statement.tokens[0].value)
self.name_line = statement.lineno
elif statement.type == Token.TESTCASE_HEADER:
self.align_header(statement)
elif not isinstance(
statement,
(Comment, EmptyLine, ForHeader, IfHeader, ElseHeader, ElseIfHeader, End),
):
self.align_statement(statement)
return statement
def align_header(self, statement):
tokens = []
for index, token in enumerate(statement.data_tokens[:-1]):
tokens.append(token)
separator = (self.widths[index] - len(token.value) + 4) * " "
tokens.append(Token(Token.SEPARATOR, separator))
tokens.append(statement.data_tokens[-1])
tokens.append(statement.tokens[-1]) # eol
statement.tokens = tokens
return statement
def align_statement(self, statement):
tokens = []
for line in statement.lines:
strip_line = [t for t in line if t.type not in (Token.SEPARATOR, Token.EOL)]
line_pos = 0
exp_pos = 0
widths = self.get_widths(statement)
for token, width in zip(strip_line, widths):
exp_pos += width + 4
if self.test_name_len:
if self.name_line == statement.lineno:
exp_pos -= self.test_name_len
self.test_name_len = 0
tokens.append(Token(Token.SEPARATOR, (exp_pos - line_pos) * " "))
tokens.append(token)
line_pos += len(token.value) + exp_pos - line_pos
tokens.append(line[-1])
statement.tokens = tokens
def get_widths(self, statement):
indent = self.indent
if isinstance(statement, (ForHeader, End, IfHeader, ElseHeader, ElseIfHeader)):
indent -= 1
if not indent:
return self.widths
return [max(width, indent * 4) for width in self.widths]
def visit_SettingSection(self, node): # noqa
return node
def visit_VariableSection(self, node): # noqa
return node
def visit_KeywordSection(self, node): # noqa
return node
def visit_CommentSection(self, node): # noqa
return node
class ColumnWidthCounter(ModelVisitor):
def __init__(self, formatting_config):
self.widths = []
self.formatting_config = formatting_config
self.test_name_lineno = -1
self.any_one_line_test = False
self.header_with_cols = False
def visit_TestCaseSection(self, node): # noqa
self.generic_visit(node)
if not self.header_with_cols and not self.any_one_line_test and self.widths:
self.widths[0] = 0
self.widths = [round_to_four(length) for length in self.widths]
@check_start_end_line
def visit_Statement(self, statement): # noqa
if statement.type == Token.COMMENT:
return
if statement.type == Token.TESTCASE_HEADER:
if len(statement.data_tokens) > 1:
self.header_with_cols = True
self._count_widths_from_statement(statement)
elif statement.type == Token.TESTCASE_NAME:
if self.widths:
self.widths[0] = max(self.widths[0], len(statement.name))
else:
self.widths.append(len(statement.name))
self.test_name_lineno = statement.lineno
else:
if self.test_name_lineno == statement.lineno:
self.any_one_line_test = True
if not isinstance(statement, (ForHeader, IfHeader, ElseHeader, ElseIfHeader, End)):
self._count_widths_from_statement(statement, indent=1)
def _count_widths_from_statement(self, statement, indent=0):
for line in statement.lines:
line = [t for t in line if t.type not in (Token.SEPARATOR, Token.EOL)]
for index, token in enumerate(line, start=indent):
if index < len(self.widths):
self.widths[index] = max(self.widths[index], len(token.value))
else:
self.widths.append(len(token.value))
```
#### File: robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
```python
from robot.api.parsing import ModelTransformer, get_model, ModelVisitor, Token
import os, sys
keywordlist = []
other_keywords = []
used_keywords = []
class ext_ExtraIndentForKeywordArguments(ModelTransformer):
def __init__(self):
self.cont = 0
def visit_File(self, node):
# Get keywords in python libraries
for path in sys.path:
if 'site-packages' in path:
goodpath = path
for path, subdirs, files in os.walk(goodpath.replace('\\', '\\\\')):
for name in files:
if '.py' in name and '.pyc' not in name and '_init_' not in name and ('robot' in path or 'wslw' in path or 'gurux' in path):
# print(os.path.join(path, name))
with open(os.path.join(path, name), 'r', errors='ignore') as f:
for line in f.readlines():
if 'def' == line.lstrip()[0:3] and '__init__' not in line:
# print(line.split('def')[1].split('(')[0].lstrip().rstrip())
other_keywords.append(line.split('def')[1].split('(')[0].lstrip().rstrip().lower().replace('_', ' '))
# Get keywords in resource files
for path, subdirs, files in os.walk(os.getcwd().replace('in_dev', 'keywords').replace('\\', '\\\\')):
for name in files:
if('.robot' in name):
# print(os.path.join(path, name))
model = get_model(os.path.join(path, name))
printer = TestNamePrinter()
printer.visit(model)
# Get keywords in the Keywords section
model = get_model(node.source)
printer = TestNamePrinter()
printer.visit(model)
# Get keywords used in the test
model = get_model(node.source)
printer = KeywordsNamePrinter()
printer.visit(model)
self.generic_visit(node)
def visit_KeywordCall(self, node):
keywords_name = [sec[0].value for sec in used_keywords]
for token in node.data_tokens:
for i, sec in enumerate(used_keywords[:-1]):
if token.lineno >= sec[1] and token.lineno < used_keywords[i + 1][1]:
# print(repr(token) + ' va con seccion: ' + sec[0].value + ' y indent_level: ' + str(sec[3]))
if token.type == Token.ARGUMENT and token.value in keywords_name:
token.value = ' ' * 4*(sec[3] - 1) + token.value
elif token.type == Token.ARGUMENT and token.value not in keywords_name:
token.value = ' ' * 4*(sec[3]) + token.value
return node
class TestNamePrinter(ModelVisitor):
def visit_KeywordName(self, node):
# print(node.name)
keywordlist.append(node.name.lower())
class KeywordsNamePrinter(ModelVisitor):
def visit_KeywordCall(self, node):
for token in node.data_tokens:
if((token.value.lower() in keywordlist or token.value.lower() in other_keywords) and token.type == Token.KEYWORD):
used_keywords.append([token, token.lineno, True, 0])
# print(repr(token) + ' ES KEYWORD RECONOCIDA')
elif((token.value.lower() in keywordlist or token.value.lower() in other_keywords) and token.type == Token.ARGUMENT):
extra_indent_level = used_keywords[-1][3] + 1
used_keywords.append([token, token.lineno, False, extra_indent_level])
# print(repr(token) + ' ES KEYWORD NO RECONOCIDA' + ' extra_indent_level: ' + str(used_keywords[-1][3]))
```
#### File: transformers/AlignSettingsSection/test_transformer.py
```python
import pytest
from .. import run_tidy_and_compare
class TestAlignSettingsSection:
TRANSFORMER_NAME = "AlignSettingsSection"
def test_align(self):
run_tidy_and_compare(self.TRANSFORMER_NAME, source="test.robot")
def test_align_all_columns(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME, source="test.robot", expected="all_columns.robot", config=":up_to_column=0"
)
def test_align_three_columns(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME, source="test.robot", expected="three_columns.robot", config=":up_to_column=3"
)
def test_align_selected_whole(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="test.robot",
expected="selected_whole.robot",
config=" --startline 1 --endline 25",
)
def test_align_selected_part(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="test.robot",
expected="selected_part.robot",
config=" --startline 9 --endline 14",
)
def test_empty_lines_inside_statement(self):
# bug from #75
run_tidy_and_compare(self.TRANSFORMER_NAME, source="empty_lines.robot")
def test_continued_statement_style(self):
run_tidy_and_compare(self.TRANSFORMER_NAME, source="multiline_keywords.robot")
def test_continued_statement_style_all_columns(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="multiline_keywords.robot",
expected="multiline_keywords_all_col.robot",
config=":up_to_column=3",
)
@pytest.mark.parametrize("indent", (0, 2, 20))
def test_continued_statement_style_all_columns_configure_indent(self, indent):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="multiline_keywords.robot",
expected=f"multiline_keywords_{indent}indent.robot",
config=f":up_to_column=3:argument_indent={indent}",
)
def test_multiline_with_blank_line(self):
run_tidy_and_compare(self.TRANSFORMER_NAME, source="blank_line_doc.robot")
def test_doc_multiline_and_whitespace(self):
run_tidy_and_compare(self.TRANSFORMER_NAME, source="blank_line_and_whitespace.robot")
```
#### File: transformers/NormalizeAssignments/test_transformer.py
```python
import pytest
from .. import run_tidy_and_compare, run_tidy
class TestNormalizeAssignments:
TRANSFORMER_NAME = "NormalizeAssignments"
@pytest.mark.parametrize(
"filename", ["common_remove.robot", "common_equal_sign.robot", "common_space_and_equal_sign.robot"]
)
def test_autodetect(self, filename):
run_tidy_and_compare(self.TRANSFORMER_NAME, source=filename)
@pytest.mark.parametrize("filename", ["common_remove", "common_equal_sign", "common_space_and_equal_sign"])
def test_autodetect_variables(self, filename):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source=filename + ".robot",
expected=filename + "_variables.robot",
config=":equal_sign_type_variables=autodetect",
)
def test_remove(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME, source="tests.robot", expected="remove.robot", config=":equal_sign_type=remove"
)
def test_add_equal_sign(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="tests.robot",
expected="equal_sign.robot",
config=":equal_sign_type=equal_sign",
)
def test_add_space_and_equal_sign(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="tests.robot",
expected="space_and_equal_sign.robot",
config=":equal_sign_type=space_and_equal_sign",
)
@pytest.mark.parametrize("param_name", ["equal_sign_type", "equal_sign_type_variables"])
def test_invalid_equal_sign_type(self, param_name):
result = run_tidy(
self.TRANSFORMER_NAME,
args=f"--transform {self.TRANSFORMER_NAME}:{param_name}==".split(),
source="tests.robot",
exit_code=1,
)
expected_output = (
f"Importing 'robotidy.transformers.{self.TRANSFORMER_NAME}' failed: "
"Creating instance failed: BadOptionUsage: Invalid configurable value: = "
f"for {param_name} for AssignmentNormalizer transformer. "
"Possible values:\n remove\n equal_sign\n space_and_equal_sign"
)
assert expected_output in str(result.exception)
```
#### File: transformers/RenameKeywords/test_transformer.py
```python
from .. import run_tidy_and_compare, run_tidy
class TestRenameKeywords:
TRANSFORMER_NAME = "RenameKeywords"
def test_transformer(self):
run_tidy_and_compare(self.TRANSFORMER_NAME, source="test.robot", expected="test.robot")
def test_renaming_pattern(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="test.robot",
expected="rename_pattern_partial.robot",
config=r":replace_pattern=(?i)rename\s?me:replace_to=New_Shining_Name",
)
def test_renaming_whole_name_pattern(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="test.robot",
expected="rename_pattern_whole.robot",
config=r":replace_pattern=(?i)^rename\s?me$:replace_to=New_Shining_Name",
)
def test_keep_underscores(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="test.robot",
expected="with_underscores.robot",
config=r":remove_underscores=False",
)
def test_invalid_pattern(self):
result = run_tidy(
self.TRANSFORMER_NAME,
args=rf"--transform {self.TRANSFORMER_NAME}:replace_pattern=[\911]".split(),
source="test.robot",
exit_code=1,
)
expected_output = (
f"Importing 'robotidy.transformers.{self.TRANSFORMER_NAME}' failed: "
"Creating instance failed: BadOptionUsage: Invalid configurable value: "
rf"'[\911]' for replace_pattern in {self.TRANSFORMER_NAME} transformer. "
"It should be a valid regex expression. Regex error: 'bad escape \\9'"
)
assert expected_output in str(result.exception)
def test_with_library_name(self):
run_tidy_and_compare(self.TRANSFORMER_NAME, source="with_library_name.robot")
```
#### File: transformers/SplitTooLongLine/test_transformer.py
```python
from .. import run_tidy_and_compare
class TestSplitTooLongLine:
TRANSFORMER_NAME = "SplitTooLongLine"
def test_split_too_long_lines(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="tests.robot",
expected="feed_until_line_length.robot",
config=":line_length=80:split_on_every_arg=False -s 4",
)
def test_split_too_long_lines_split_on_every_arg(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="tests.robot",
expected="split_on_every_arg.robot",
config=":line_length=80:split_on_every_arg=True -s 4",
)
def test_split_lines_with_multiple_assignments(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="multiple_assignments.robot",
expected="multiple_assignments_until_line_length.robot",
config=":line_length=80:split_on_every_arg=False -s 4",
)
def test_split_lines_with_multiple_assignments_on_every_arg(self):
run_tidy_and_compare(
self.TRANSFORMER_NAME,
source="multiple_assignments.robot",
expected="multiple_assignments_on_every_arg.robot",
config=":line_length=80:split_on_every_arg=True -s 4",
)
```
#### File: tests/utest/test_utils.py
```python
import os
from pathlib import Path
import pytest
from robotidy.app import Robotidy
from robotidy.utils import decorate_diff_with_color, split_args_from_name_or_path, GlobalFormattingConfig
@pytest.fixture
def app():
formatting_config = GlobalFormattingConfig(
space_count=4,
line_sep="auto",
start_line=None,
separator="space",
end_line=None,
)
return Robotidy(
transformers=[],
transformers_config=[],
src=(".",),
exclude=None,
extend_exclude=None,
overwrite=False,
show_diff=False,
formatting_config=formatting_config,
verbose=False,
check=False,
output=None,
force_order=False,
)
class TestUtils:
def test_not_changed_lines_not_colorized(self):
lines = ["this is one line\n", "and another\n"]
output = decorate_diff_with_color(lines)
assert output == "".join(lines)
def test_diff_lines_colorized(self):
lines = [
"+++ color category\n",
"--- color category\n",
"+ new line\n",
"- removed line\n",
"@@ line numbers\n",
"no diff line\n",
"signs + in the - middle @@ +++ ---\n",
]
expected_lines = [
"\033[1m+++ color category\n\033[0m",
"\033[1m--- color category\n\033[0m",
"\033[32m+ new line\n\033[0m",
"\033[31m- removed line\n\033[0m",
"\033[36m@@ line numbers\n\033[0m",
"no diff line\n",
"signs + in the - middle @@ +++ ---\n",
]
output = decorate_diff_with_color(lines)
assert output == "".join(expected_lines)
@pytest.mark.parametrize(
"name_or_path, expected_name, expected_args",
[
("DiscardEmptySections", "DiscardEmptySections", []),
("DiscardEmptySections:allow_only_comments=True", "DiscardEmptySections", ["allow_only_comments=True"]),
("DiscardEmptySections;allow_only_comments=True", "DiscardEmptySections", ["allow_only_comments=True"]),
(
"DiscardEmptySections;allow_only_comments=True:my_var=1",
"DiscardEmptySections",
["allow_only_comments=True:my_var=1"],
),
(r"C:\path\to\module\transformer:my_variable=1", r"C:\path\to\module\transformer", ["my_variable=1"]),
(__file__, __file__, []),
],
)
def test_split_args_from_name_or_path(self, name_or_path, expected_name, expected_args):
name, args = split_args_from_name_or_path(name_or_path)
assert name == expected_name
assert args == expected_args
@pytest.mark.parametrize(
"line_sep, source_file, expected",
[
("auto", "lf.robot", "\n"),
("auto", "crlf.robot", "\r\n"),
("auto", "cr.robot", "\r"),
("auto", "crlf_mixed.robot", "\n"),
("auto", "empty.robot", os.linesep),
("native", "lf.robot", os.linesep),
("native", "crlf.robot", os.linesep),
("windows", "lf.robot", "\r\n"),
("windows", "crlf.robot", "\r\n"),
("unix", "lf.robot", "\n"),
("unix", "crlf.robot", "\n"),
],
)
def test_get_line_ending(self, line_sep, source_file, expected, app):
source = str(Path(__file__).parent / "testdata" / "auto_line_sep" / source_file)
app.formatting_config = GlobalFormattingConfig(
space_count=4,
line_sep=line_sep,
start_line=None,
separator="space",
end_line=None,
)
assert app.get_line_ending(source) == expected
``` |
{
"source": "josgard94/CaesarCipher",
"score": 5
} |
#### File: josgard94/CaesarCipher/CesarEncryption.py
```python
import string
class Encryption:
def EncrypText(self, PlaneText, n):
EncrypT = "";
EncrypTxt = self.TextToNumber(PlaneText)
i = 0;
while i < len(EncrypTxt):
aux = EncrypTxt[i]
try:
x = int(EncrypTxt[i])
if x >= 0 or x <= 60:
E_n = ((x - n) % 61)
#print(E_n)
letter = self.NumberToText(E_n)
EncrypT += letter
i += 1;
except ValueError:
#i += 1;
EncrypT += aux
i += 1;
return EncrypT
def DecrypText(self, EncrypTxt, n):
Text = ""
StringNumber = self.TextToNumber(EncrypTxt)
i = 0;
while i < len(StringNumber):
aux = StringNumber[i]
try:
x = int(StringNumber[i])
if x >= 0 or x <= 60:
D_n = ((x + n) % 61)
letter = self.NumberToText(D_n)
Text += letter
i += 1;
except ValueError:
#i += 1;
Text += aux
i += 1;
return Text
def NumberToText(self,Number):
letter = 'abcdefghijklmnopqrstuvwxyzABCDFGH<KEY>'
return letter[Number]
def TextToNumber(self,Text):
NumberString = []
letter = 'abcdefghijklmnopqrstuvwxyzABCDFGHIJKLMN<KEY>'
i = 0
for c in Text:
#c = Text[i]
if c in letter:
#NumberString[i] = str(letter.index(c))
NumberString.append(str(letter.index(c)))
else:
NumberString.append(c);
i += 1;
return NumberString
``` |
{
"source": "Josh0105/practicaPresencial",
"score": 4
} |
#### File: Josh0105/practicaPresencial/calculadora.py
```python
import math
class calculadora:
def __init__(self):
self.historial = []
def suma(self,a,b):
self.historial.append(str(a)+" + "+str(b)+" = "+str(a+b))
return a+b
def resta(self,a,b):
self.historial.append(str(a)+" - "+str(b)+" = "+str(a-b))
return a-b
def multiplicar(self,a,b):
self.historial.append(str(a)+" * "+str(b)+" = "+str(a*b))
return a*b
def dividir(self,a,b):
try:
self.historial.append(str(a)+" / "+str(b)+" = "+str(a/b))
return a/b
except:
self.historial.append("Division no definida")
return False
def potencia(self,a,b):
try:
self.historial.append(str(a)+" ^ "+str(b)+" = "+str(pow(a,b)))
return pow(a,b)
except:
self.historial.append("Potencia no definida")
False
def raiz(self,a,b):
try:
self.historial.append(str(b)+" ^ ( 1 / "+str(a)+" ) = "+str(pow(b,(1/a))))
return pow(b,(1/a))
except:
self.historial.append("Raiz no definida")
return False
``` |
{
"source": "Josh0105/proyecto2BackendPython",
"score": 3
} |
#### File: Josh0105/proyecto2BackendPython/app.py
```python
from flask import Flask, request, jsonify
from flask_cors import CORS
from Usuario import Usuario
from CRUD_Usuarios import CRUD_Usuarios
from CRUD_VideoJuego import CRUD_VidoJuego
#CREAMOS EL CRUD DE USUARIOS Y DE VIDEO JUEGOS
var_Usuarios = CRUD_Usuarios()
var_Juegos = CRUD_VidoJuego()
app = Flask(__name__)
CORS(app)
#ENDPOINT DE LOGIN
@app.route('/login', methods=['POST'])
def login():
if request.method == 'POST':
response = {}
nombre = request.form.get('nombre_usuario')
passw = request.form.get('passw_usuario')
usuario = var_Usuarios.autenticar_Usuario(nombre,passw)
if usuario is not False:
response['id'] = usuario.id
response['usuario'] = usuario.nombre
response['estado'] = 1
response['admin'] = usuario.admin
return response
response['estado'] = 0
return response
#ENDPOINT DE AGREGAR COMENTARIO
@app.route('/agregar-comentario', methods = ["POST"])
def agregarComent():
if request.method == "POST":
response = {}
idUser = int(request.form.get('id_usuario'))
nombreUsuario = var_Usuarios.devolver_nombre_usuario(idUser)
idJuego = int(request.form.get('id_juego'))
comentario = request.form.get('comentario')
fecha = request.form.get('fecha')
estadoAgregado = var_Juegos.nuevoComentario(comentario,nombreUsuario,fecha,idJuego)
response['comentario_agregado'] = estadoAgregado
return response
#ENDPOINT DE AGREGAR JUEGO A BIBLIOTECA
@app.route('/agregar-a-biblioteca', methods = ["POST"])
def agregarABiblioteca():
if request.method == "POST":
response = {}
idUser = int(request.form.get('id_usuario'))
idJuego = int(request.form.get('id_juego'))
estadoAgregado = var_Usuarios.agregarABiblioteca(idUser,idJuego)
response['estado_agregado'] = estadoAgregado
return response
#ENDPOINT DE MODIFICAR USUARIO
@app.route('/modificar-usuario', methods =["POST"])
def modificarUsuario():
if request.method == 'POST':
response = {}
id = int(request.form.get('id_usuario'))
nombre = request.form.get('nombre')
apellido = request.form.get('apellido')
userName = request.form.get('user_name')
contrasena = request.form.get('contrasena')
contrasena2 = request.form.get('contrasena2')
estadoCreacion = var_Usuarios.modificar_Usuario(id,nombre,apellido,userName,contrasena,contrasena2)
response['estado_creacion'] = estadoCreacion
return response
#ENDPOINT DE MODIFICAR LOS DATOS DE UN JUEGO
@app.route('/modificar-juego', methods =["POST"])
def modificarJuego():
if request.method == 'POST':
response = {}
id = int(request.form.get('id_juego'))
nombreJuego = request.form.get('nombre_juego')
anio = request.form.get('anio')
precio = request.form.get('precio')
cat1 = request.form.get('categoria_1')
cat2 = request.form.get('categoria_2')
cat3 = request.form.get('categoria_3')
foto = request.form.get('foto')
banner = request.form.get('banner')
descripcion = request.form.get('descripcion')
estadoCreacion = var_Juegos.modificarJuego(id,nombreJuego,anio,precio,cat1,cat2,cat3,foto,banner,descripcion)
response['estado_creacion'] = estadoCreacion
return response
#ENDPOINT PARA CREACION DE UN JUEGO NUEVO
@app.route('/nuevo-juego', methods =["POST"])
def nuevoJuego():
if request.method == 'POST':
response = {}
nombreJuego = request.form.get('nombre_juego')
anio = request.form.get('anio')
precio = request.form.get('precio')
cat1 = request.form.get('categoria_1')
cat2 = request.form.get('categoria_2')
cat3 = request.form.get('categoria_3')
foto = request.form.get('foto')
banner = request.form.get('banner')
descripcion = request.form.get('descripcion')
estadoCreacion = var_Juegos.crearJuego(nombreJuego,anio,precio,cat1,cat2,cat3,foto,banner,descripcion)
response['estado_creacion'] = estadoCreacion
return response
#ENDPOINT PARA REGISTRO DE UN USUARIO CLIENTE
@app.route('/registro', methods =["POST"])
def registro():
if request.method == 'POST':
response = {}
nombre = request.form.get('nombre')
apellido = request.form.get('apellido')
userName = request.form.get('user_name')
contrasena = request.form.get('contrasena')
contrasena2 = request.form.get('contrasena2')
estadoCreacion = var_Usuarios.crear_Usuario(nombre,apellido,userName,contrasena,contrasena2,False)
response['estado_creacion'] = estadoCreacion
return response
#ENDPOINT PARA REGISTRO DE UN USUARIO ADMINISTRADOR
@app.route('/registro-admin', methods =["POST"])
def registroAdmin():
if request.method == 'POST':
response = {}
nombre = request.form.get('nombre')
apellido = request.form.get('apellido')
userName = request.form.get('user_name')
contrasena = request.form.get('contrasena')
contrasena2 = request.form.get('contrasena2')
estadoCreacion = var_Usuarios.crear_Usuario(nombre,apellido,userName,contrasena,contrasena2,True)
response['estado_creacion'] = estadoCreacion
return response
#ENDPOINT PARA ELIMINAR UN JUEGO DE LA LISTA
@app.route('/eliminar-juego', methods =["POST"])
def eliminar_Juego():
if request.method == 'POST':
response = {}
id = int(request.form.get('id_juego'))
estadoCreacion = var_Juegos.eliminarJuego(id)
response['estado_creacion'] = estadoCreacion
return response
#ENDPOINT PARA BUSCAR UN JUEGO POR CATEGORIA
@app.route('/buscar',methods =["POST"])
def buscar():
if request.method == 'POST':
response = {}
valor = request.form.get('valor')
resultadoBusqueda = var_Juegos.buscar_Juegos(valor)
response['resultados'] = resultadoBusqueda
return response
#ENDPOINT PARA OBTENER LOS RESULTADOS DE UNA BUSQUEDA POR CATEGORIA
@app.route('/obtener-resultado-busqueda')
def obtenerResultadoBusqueda():
return var_Juegos.devolver_Juegos_Lista(var_Juegos.resultadoBusqueda)
#ENDPOINT PARA OBTENER TODOS LOS DATOS DE UN USUARIO
@app.route('/obtener-datos-usuario')
def obtenerDatosUser():
id = int(request.args.get('id',None))
print("se obtubo id")
return var_Usuarios.devolver_datos_usuario(id)
#ENDPOINT PARA RECUPERAR CONTRASEÑA
@app.route('/recuperar',methods =["POST"])
def recuperar():
if request.method == 'POST':
response = {}
userName = request.form.get('user_name')
usuarioRecuperar = var_Usuarios.recuperar_Contrasena(userName)
if usuarioRecuperar is not False:
response['contrasena'] = usuarioRecuperar.contrasena
response['estado'] = 1
return response
response['estado'] = 0
return response
#ENDPOINT PARA OBTENER LOS DATOS DE TODOS LOS JUEGOS
@app.route('/obtener-todos-juegos')
def obtenerTododosJuegos():
return var_Juegos.devolver_Juegos()
#ENDPOINT PARA OBTENER LOS DATOS DE TODOS LOS USUARIOS
@app.route('/obtener-todos-usuarios')
def obtenerTododosUsuarios():
return var_Usuarios.devolver_Usuarios()
#ENDPOINT PARA OBTENER LOS JUEGOS DE LA BIBLIOTECA DE UN USUARIO
@app.route('/obtener-mi-biblioteca')
def obtenerMiBiblioteca():
id = int(request.args.get('id',None))
print("se obtubo id")
return var_Juegos.devolver_Juegos_Lista(var_Usuarios.misUsuarios[id].biblioteca)
#ENDPOINT PARA OBTENER TODOS LOS COMENTARIOS DE UN JUEGO
@app.route('/obtener-todos-comentarios')
def obtenerTododosComentarios():
id = int(request.args.get('id',None))
print("se obtubo id")
coments = var_Juegos.devolver_ComentariosJuego(id)
if coments is not False:
return coments
#ENDPOINT PARA OBTENER LOS DATOS DE UN JUEGO
@app.route('/obtener-juego')
def obtener_juego():
id = int(request.args.get('id',None))
print("se mandó id")
game = var_Juegos.devolver_Juego(id)
if game is not None:
return{
'estado': 1,
'data' : game
}
#ENDPOINT DEFAULT
@app.route("/")
def index():
return "<h1>Bienvenido</h1>"
if __name__ == "__main__":
app.run(threaded=True, port=5000, debug=True)
```
#### File: Josh0105/proyecto2BackendPython/CRUD_Usuarios.py
```python
from Usuario import Usuario
import json
#DEFINIMOS LA CLASE CRUD_USUARIOS
class CRUD_Usuarios:
#constructor del CRUD usuarios
def __init__(self):
self.misUsuarios = []
self.contador = 0
#Aqui se créa el usuario mestro
self.misUsuarios.append(Usuario(0,"Usuario","Maestro","admin","admin",True))
#método para creación de usuarios cliente
#retornos: 1: creado, 2:UserName no empieza con letra, 3: userName no es alfanumerica,
# 4: userName ya existe, 5:contraseñas no coinciden, 6:existen espacios vacios
def crear_Usuario(self,nombre,apellido,userName,contrasena,contrasena2,admin):
#Si existe algún espacio vacío
if nombre== "" or apellido== "" or userName == "" or contrasena =="" or contrasena2 == "":
print("Existen espacion vacios")
return 6
#Si el primer caracter de userName no es letra
if userName[0].isalpha() == False:
print("UserName no empieza con una letra")
return 2
#Si la cadena userName no es alfanumerica
elif userName.isalnum() == False:
print("UserName no contiene solo números o letras")
return 3
#Este for revisa si el userName ya existe
for user in self.misUsuarios:
if user.userName == userName:
print("Este UserName ya existe")
return 4
#Este if evalua si las contraseñas coinciden
if contrasena != contrasena2:
print("Las contraseñas no coinciden")
return 5
#Si todo está correcto aumentamos el contador y creamos el usuario
self.contador += 1
self.misUsuarios.append(Usuario(self.contador,nombre,apellido,userName,contrasena,admin))
print("se creó un usuario con exito")
return 1
#recive un ID y devuelve el nombre del usuario
def devolver_nombre_usuario(self,id):
for user in self.misUsuarios:
if user.id == id:
return user.userName
return ""
#devuelve un usuario en formato json
def devolver_datos_usuario(self,id):
for user in self.misUsuarios:
if user.id == id:
return user.dump()
return False
#devuelve todos los usuarios en formato json
def devolver_Usuarios(self):
return json.dumps([user.dump() for user in self.misUsuarios])
#devuelve el usuario si el user y la contraseña son correctos
def autenticar_Usuario(self,userName,contrasena):
for user in self.misUsuarios:
if user.autenticar(userName,contrasena) == True:
print("usuario y contraseña correcta")
return user
return False
#devuelve la contraseña del usuario si existe el usuario
def recuperar_Contrasena(self,userName):
for user in self.misUsuarios:
if user.userName == userName:
print("la contraseña es: " + user.contrasena)
return user
return False
#método para la modificación de un usuario
#retornos: 1: creado, 2:UserName no empieza con letra, 3: userName no es alfanumerica,
# 4: userName ya existe, 5:contraseñas no coinciden, 6: espacios vacios
def modificar_Usuario(self,id,nombre,apellido,userName,contrasena,contrasena2):
#Si existe algún espacio vacío
if nombre== "" or apellido== "" or userName == "" or contrasena =="" or contrasena2 == "":
print("Existen espacion vacios")
return 6
#Si el primer caracter de userName no es letra
if userName[0].isalpha() == False:
print("UserName no empieza con una letra")
return 2
#Si la cadena userName no es alfanumerica
elif userName.isalnum() == False:
print("UserName no contiene solo números o letras")
return 3
#Este for revisa si el userName ya existe en otros id diferentes de este
for user in self.misUsuarios:
if user.id != id:
if user.userName == userName:
print("El nuevo UserName ya existe en otro usuario")
return 4
#Este if evalua si las contraseñas coinciden
if contrasena != contrasena2:
print("Las contraseñas no coinciden")
return 5
#Si todo está correcto modificamos el usuario
if self.misUsuarios[id].admin == True:
self.misUsuarios[id].modificarDatos(id,nombre,apellido,userName,contrasena,True)
print("se modificó un usuario administrador con exito")
return 1
else:
self.misUsuarios[id].modificarDatos(id,nombre,apellido,userName,contrasena,False)
print("se modificó un usuario normal con exito")
return 1
#Agrega un juego a la biblioteca del usuario retorna 1 si se agrega,
# #0 si ya se encontraba y 2 si no existe existe el usuario
def agregarABiblioteca(self,idUsuario,idJuego):
for user in self.misUsuarios:
if user.id == idUsuario:
if(user.agregarIDJuego(idJuego)==True):
return 1
else:
return 0
print("usuario no existe")
return 2
``` |
{
"source": "Josh0105/SESION9",
"score": 3
} |
#### File: Josh0105/SESION9/Pokemon.py
```python
class Pokemon:
#public Pokemon(parametros):{}
def __init__(self,id,nombre,especie,tipo,foto):
self.id = id
self.nombre = nombre
self.especie = especie
self.tipo = tipo
self.foto = foto
def imprimir_tipo(self):
print(self.nombre + ' es de tipo ' + self.tipo)
```
#### File: Josh0105/SESION9/Usuario.py
```python
class Usuario:
def __init__(self, id, usuario, passw):
self.id = id
self.usuario = usuario
self.passw = <PASSWORD>
def autenticar(self, usuario, passw):
if self.usuario == usuario and self.passw == passw:
print("La autenticación fue correcta")
return True
print ("La autenticación fue incorrecta")
return False
def dump(self):
return {
'id' : self.id,
'nombre' : self.usuario
}
``` |
{
"source": "josh0122/mmsegmentation",
"score": 2
} |
#### File: models/decode_heads/point_head.py
```python
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import point_sample
from mmseg.models.builder import HEADS
from mmseg.ops import resize
from ..losses import accuracy
from .cascade_decode_head import BaseCascadeDecodeHead
def calculate_uncertainty(seg_logits):
"""Estimate uncertainty based on seg logits.
For each location of the prediction ``seg_logits`` we estimate
uncertainty as the difference between top first and top second
predicted logits.
Args:
seg_logits (Tensor): Semantic segmentation logits,
shape (batch_size, num_classes, height, width).
Returns:
scores (Tensor): T uncertainty scores with the most uncertain
locations having the highest uncertainty score, shape (
batch_size, 1, height, width)
"""
top2_scores = torch.topk(seg_logits, k=2, dim=1)[0]
return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1)
@HEADS.register_module()
class PointHead(BaseCascadeDecodeHead):
"""A mask point head use in PointRend.
This head is implemented of `PointRend: Image Segmentation as
Rendering <https://arxiv.org/abs/1912.08193>`_.
``PointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict|None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict|None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
"""
def __init__(self,
num_fcs=3,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU', inplace=False),
**kwargs):
super(PointHead, self).__init__(
input_transform='multiple_select',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc_seg')),
**kwargs)
self.num_fcs = num_fcs
self.coarse_pred_each_layer = coarse_pred_each_layer
fc_in_channels = sum(self.in_channels) + self.num_classes
fc_channels = self.channels
self.fcs = nn.ModuleList()
for k in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += self.num_classes if self.coarse_pred_each_layer \
else 0
self.fc_seg = nn.Conv1d(
fc_in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0)
if self.dropout_ratio > 0:
self.dropout = nn.Dropout(self.dropout_ratio)
delattr(self, 'conv_seg')
def cls_seg(self, feat):
"""Classify each pixel with fc."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.fc_seg(feat)
return output
def forward(self, fine_grained_point_feats, coarse_point_feats):
x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_point_feats), dim=1)
return self.cls_seg(x)
def _get_fine_grained_point_feats(self, x, points):
"""Sample from fine grained features.
Args:
x (list[Tensor]): Feature pyramid from by neck or backbone.
points (Tensor): Point coordinates, shape (batch_size,
num_points, 2).
Returns:
fine_grained_feats (Tensor): Sampled fine grained feature,
shape (batch_size, sum(channels of x), num_points).
"""
fine_grained_feats_list = [
point_sample(_, points, align_corners=self.align_corners)
for _ in x
]
if len(fine_grained_feats_list) > 1:
fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1)
else:
fine_grained_feats = fine_grained_feats_list[0]
return fine_grained_feats
def _get_coarse_point_feats(self, prev_output, points):
"""Sample from fine grained features.
Args:
prev_output (list[Tensor]): Prediction of previous decode head.
points (Tensor): Point coordinates, shape (batch_size,
num_points, 2).
Returns:
coarse_feats (Tensor): Sampled coarse feature, shape (batch_size,
num_classes, num_points).
"""
coarse_feats = point_sample(
prev_output, points, align_corners=self.align_corners)
return coarse_feats
def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg,
train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
prev_output (Tensor): The output of previous decode head.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self._transform_inputs(inputs)
with torch.no_grad():
points = self.get_points_train(
prev_output, calculate_uncertainty, cfg=train_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, points)
coarse_point_feats = self._get_coarse_point_feats(prev_output, points)
point_logits = self.forward(fine_grained_point_feats,
coarse_point_feats)
point_label = point_sample(
gt_semantic_seg.float(),
points,
mode='nearest',
align_corners=self.align_corners)
point_label = point_label.squeeze(1).long()
losses = self.losses(point_logits, point_label)
return losses
def forward_test(self, inputs, prev_output, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
prev_output (Tensor): The output of previous decode head.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
x = self._transform_inputs(inputs)
refined_seg_logits = prev_output.clone()
for _ in range(test_cfg.subdivision_steps):
refined_seg_logits = resize(
refined_seg_logits,
scale_factor=test_cfg.scale_factor,
mode='bilinear',
align_corners=self.align_corners)
batch_size, channels, height, width = refined_seg_logits.shape
point_indices, points = self.get_points_test(
refined_seg_logits, calculate_uncertainty, cfg=test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, points)
coarse_point_feats = self._get_coarse_point_feats(
prev_output, points)
point_logits = self.forward(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_seg_logits = refined_seg_logits.reshape(
batch_size, channels, height * width)
refined_seg_logits = refined_seg_logits.scatter_(
2, point_indices, point_logits)
refined_seg_logits = refined_seg_logits.view(
batch_size, channels, height, width)
return refined_seg_logits
def losses(self, point_logits, point_label):
"""Compute segmentation loss."""
loss = dict()
if not isinstance(self.loss_decode, nn.ModuleList):
losses_decode = [self.loss_decode]
else:
losses_decode = self.loss_decode
for loss_module in losses_decode:
loss['point' + loss_module.loss_name] = loss_module(
point_logits, point_label, ignore_index=self.ignore_index)
loss['acc_point'] = accuracy(point_logits, point_label)
return loss
def get_points_train(self, seg_logits, uncertainty_func, cfg):
"""Sample points for training.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'uncertainty_func' function that takes point's logit prediction as
input.
Args:
seg_logits (Tensor): Semantic segmentation logits, shape (
batch_size, num_classes, height, width).
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains the coordinates of ``num_points`` sampled
points.
"""
num_points = cfg.num_points
oversample_ratio = cfg.oversample_ratio
importance_sample_ratio = cfg.importance_sample_ratio
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = seg_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=seg_logits.device)
point_logits = point_sample(seg_logits, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=seg_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_point_coords = torch.rand(
batch_size, num_random_points, 2, device=seg_logits.device)
point_coords = torch.cat((point_coords, rand_point_coords), dim=1)
return point_coords
def get_points_test(self, seg_logits, uncertainty_func, cfg):
"""Sample points for testing.
Find ``num_points`` most uncertain points from ``uncertainty_map``.
Args:
seg_logits (Tensor): A tensor of shape (batch_size, num_classes,
height, width) for class-specific or class-agnostic prediction.
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (batch_size, num_points)
that contains indices from [0, height x width) of the most
uncertain points.
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the ``height x width`` grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = uncertainty_func(seg_logits)
batch_size, _, height, width = uncertainty_map.shape
h_step = 1.0 / height
w_step = 1.0 / width
uncertainty_map = uncertainty_map.view(batch_size, height * width)
num_points = min(height * width, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
point_coords = torch.zeros(
batch_size,
num_points,
2,
dtype=torch.float,
device=seg_logits.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
width).float() * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
width).float() * h_step
return point_indices, point_coords
```
#### File: models/decode_heads/stdc_head.py
```python
import torch
import torch.nn.functional as F
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class STDCHead(FCNHead):
"""This head is the implementation of `Rethinking BiSeNet For Real-time
Semantic Segmentation <https://arxiv.org/abs/2104.13188>`_.
Args:
boundary_threshold (float): The threshold of calculating boundary.
Default: 0.1.
"""
def __init__(self, boundary_threshold=0.1, **kwargs):
super(STDCHead, self).__init__(**kwargs)
self.boundary_threshold = boundary_threshold
# Using register buffer to make laplacian kernel on the same
# device of `seg_label`.
self.register_buffer(
'laplacian_kernel',
torch.tensor([-1, -1, -1, -1, 8, -1, -1, -1, -1],
dtype=torch.float32,
requires_grad=False).reshape((1, 1, 3, 3)))
self.fusion_kernel = torch.nn.Parameter(
torch.tensor([[6. / 10], [3. / 10], [1. / 10]],
dtype=torch.float32).reshape(1, 3, 1, 1),
requires_grad=False)
def losses(self, seg_logit, seg_label):
"""Compute Detail Aggregation Loss."""
# Note: The paper claims `fusion_kernel` is a trainable 1x1 conv
# parameters. However, it is a constant in original repo and other
# codebase because it would not be added into computation graph
# after threshold operation.
seg_label = seg_label.float()
boundary_targets = F.conv2d(
seg_label, self.laplacian_kernel, padding=1)
boundary_targets = boundary_targets.clamp(min=0)
boundary_targets[boundary_targets > self.boundary_threshold] = 1
boundary_targets[boundary_targets <= self.boundary_threshold] = 0
boundary_targets_x2 = F.conv2d(
seg_label, self.laplacian_kernel, stride=2, padding=1)
boundary_targets_x2 = boundary_targets_x2.clamp(min=0)
boundary_targets_x4 = F.conv2d(
seg_label, self.laplacian_kernel, stride=4, padding=1)
boundary_targets_x4 = boundary_targets_x4.clamp(min=0)
boundary_targets_x4_up = F.interpolate(
boundary_targets_x4, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up = F.interpolate(
boundary_targets_x2, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up[
boundary_targets_x2_up > self.boundary_threshold] = 1
boundary_targets_x2_up[
boundary_targets_x2_up <= self.boundary_threshold] = 0
boundary_targets_x4_up[
boundary_targets_x4_up > self.boundary_threshold] = 1
boundary_targets_x4_up[
boundary_targets_x4_up <= self.boundary_threshold] = 0
boudary_targets_pyramids = torch.stack(
(boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up),
dim=1)
boudary_targets_pyramids = boudary_targets_pyramids.squeeze(2)
boudary_targets_pyramid = F.conv2d(boudary_targets_pyramids,
self.fusion_kernel)
boudary_targets_pyramid[
boudary_targets_pyramid > self.boundary_threshold] = 1
boudary_targets_pyramid[
boudary_targets_pyramid <= self.boundary_threshold] = 0
seg_logit = F.interpolate(
seg_logit,
boundary_targets.shape[2:],
mode='bilinear',
align_corners=True)
loss = super(STDCHead, self).losses(seg_logit,
boudary_targets_pyramid.long())
return loss
``` |
{
"source": "josh0122/PyTorch-Simple-MaskRCNN",
"score": 2
} |
#### File: PyTorch-Simple-MaskRCNN/pytorch_mask_rcnn/utils.py
```python
import os
import re
import random
import torch
__all__ = ["save_ckpt", "Meter"]
def save_ckpt(model, optimizer, epochs, ckpt_path, **kwargs):
checkpoint = {}
checkpoint["model"] = model.state_dict()
checkpoint["optimizer"] = optimizer.state_dict()
checkpoint["epochs"] = epochs
for k, v in kwargs.items():
checkpoint[k] = v
prefix, ext = os.path.splitext(ckpt_path)
ckpt_path = "{}-{}{}".format(prefix, epochs, ext)
torch.save(checkpoint, ckpt_path)
class TextArea:
def __init__(self):
self.buffer = []
def write(self, s):
self.buffer.append(s)
def __str__(self):
return "".join(self.buffer)
def get_AP(self):
result = {"bbox AP": 0.0, "mask AP": 0.0}
txt = str(self)
values = re.findall(r"(\d{3})\n", txt)
if len(values) > 0:
values = [int(v) / 10 for v in values]
result = {"bbox AP": values[0], "mask AP": values[12]}
return result
class Meter:
def __init__(self, name):
self.name = name
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name}:sum={sum:.2f}, avg={avg:.4f}, count={count}"
return fmtstr.format(**self.__dict__)
``` |
{
"source": "josh0xA/PyFlappyBird",
"score": 3
} |
#### File: PyFlappyBird/core/physics.py
```python
from core.config import *
from core.fgame import *
try:
xrange
except NameError:
xrange = range # can only be applied with python2.7
def playerObjectShm(playerObjectShm):
util = Utility()
"""oscillates the value of playerObjectShm['val'] between 8 and -8"""
if abs(playerObjectShm['val']) == 8:
playerObjectShm['dir'] *= -1
if playerObjectShm['dir'] == 1:
playerObjectShm['val'] += 1
else:
playerObjectShm['val'] -= 1
def ScoreViewer(score):
util = Utility()
scr = Screener()
"""displays score in center of screen"""
digScore = [int(x) for x in list(str(score))]
_totalwidth = 0 # total width of all numbers to be printed
for digit in digScore: # iterate through the digits
_totalwidth += FL_IMAGE_BOUNDARY['numbers'][digit].get_width()
arrayOffsetX = (util.FL_GAME_SCREENWIDTH - _totalwidth) / 2
for digit in digScore: # second iteration
scr._KSCREEN.blit(FL_IMAGE_BOUNDARY['numbers'][digit], (arrayOffsetX, util.FL_GAME_SCREENHEIGHT * 0.1))
arrayOffsetX += FL_IMAGE_BOUNDARY['numbers'][digit].get_width()
def RandomPipeGetter():
util = Utility()
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapperY = random.randrange(0, int(FL_BASE_Y * 0.6 - util.FL_PIPE_GAPPER))
gapperY += int(FL_BASE_Y * 0.2)
pipeHeight = FL_IMAGE_BOUNDARY['pipe'][0].get_height()
pipeX = util.FL_GAME_SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapperY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapperY + util.FL_PIPE_GAPPER}, # lower pipe
]
def IsObjectCrashed(playerObject, _pipeUpper, _pipeLower):
util = Utility()
"""returns True if playerObject collders with base or pipes."""
jplayerObjectIndex = playerObject['index']
playerObject['w'] = FL_IMAGE_BOUNDARY['playerObject'][0].get_width()
playerObject['h'] = FL_IMAGE_BOUNDARY['playerObject'][0].get_height()
# if playerObject crashes into ground
if playerObject['y'] + playerObject['h'] >= FL_BASE_Y - 1:
return [True, True]
else:
playerObjectRect = pygame.Rect(playerObject['x'], playerObject['y'],
playerObject['w'], playerObject['h'])
pipeW = FL_IMAGE_BOUNDARY['pipe'][0].get_width()
pipeH = FL_IMAGE_BOUNDARY['pipe'][0].get_height()
for _pipeIterA, _pipeIterB in zip(_pipeUpper, _pipeLower):
# upper and lower pipe rects
uPipeRect = pygame.Rect(_pipeIterA['x'], _pipeIterA['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(_pipeIterB['x'], _pipeIterB['y'], pipeW, pipeH)
# playerObject and upper/lower pipe hitmasks
pHitMask = FL_HITMASK_BOUNDARY['playerObject'][jplayerObjectIndex]
uHitmask = FL_HITMASK_BOUNDARY['pipe'][0]
lHitmask = FL_HITMASK_BOUNDARY['pipe'][1]
# if bird collided with upipe or lpipe
collideA = OnPixelCollisionDetector(playerObjectRect, uPipeRect, pHitMask, uHitmask)
collideB = OnPixelCollisionDetector(playerObjectRect, lPipeRect, pHitMask, lHitmask)
if collideA or collideB:
return [True, False]
return [False, False]
def OnPixelCollisionDetector(_rectangleA, _rectangleB, _hitmaskA, _hitmaskB):
util = Utility()
"""Checks if two objects collide and not just their rects"""
r = _rectangleA.clip(_rectangleB)
if r.width == 0 or r.height == 0:
return False
x1, y1 = r.x - _rectangleA.x, r.y - _rectangleA.y
x2, y2 = r.x - _rectangleB.x, r.y - _rectangleB.y
# on collision...
for x in xrange(r.width):
for y in xrange(r.height):
if _hitmaskA[x1+x][y1+y] and _hitmaskB[x2+x][y2+y]:
return True
return False
def FlappyBirdLoadUp(movementInfo):
util = Utility()
scr = Screener()
global FL_BASE_Y
FL_BASE_Y = util.FL_GAME_SCREENHEIGHT * 0.79
score = playerObjectIndex = mLoopIterator = 0
playerObjectIndexGen = movementInfo['playerObjectIndexGen']
playerObjectx, playerObjecty = int(util.FL_GAME_SCREENWIDTH * 0.2), movementInfo['playerObjecty']
FL_BASE_X = movementInfo['FL_BASE_X']
baseShift = FL_IMAGE_BOUNDARY['base'].get_width() - FL_IMAGE_BOUNDARY['background'].get_width()
# get 2 new pipes to add to _pipeUpper _pipeLower list
newPipe1 = RandomPipeGetter()
newPipe2 = RandomPipeGetter()
# list of upper pipes
_pipeUpper = [
{'x': util.FL_GAME_SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': util.FL_GAME_SCREENWIDTH + 200 + (util.FL_GAME_SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
_pipeLower = [
{'x': util.FL_GAME_SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': util.FL_GAME_SCREENWIDTH + 200 + (util.FL_GAME_SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# playerObject velocity, max velocity, downward accleration, accleration on flap
playerObjectVelY = -9 # playerObject's velocity along Y, default same as playerObjectFlapped
playerObjectMaxVelY = 10 # max vel along Y, max descend speed
playerObjectMinVelY = -8 # min vel along Y, max ascend speed
playerObjectAccY = 1 # playerObjects downward accleration
playerObjectRot = 45 # playerObject's rotation
playerObjectVelRot = 3 # angular speed
playerObjectRotThr = 20 # rotation threshold
playerObjectFlapAcc = -9 # playerObjects speed on flapping
playerObjectFlapped = False # True when playerObject flaps
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playerObjecty > -2 * FL_IMAGE_BOUNDARY['playerObject'][0].get_height():
playerObjectVelY = playerObjectFlapAcc
playerObjectFlapped = True
FL_SOUND_BOUNDARY['wing'].play()
# check for crash here
_tcrash = IsObjectCrashed({'x': playerObjectx, 'y': playerObjecty, 'index': playerObjectIndex},
_pipeUpper, _pipeLower)
if _tcrash[0]:
return {
'y': playerObjecty,
'groundCrash': _tcrash[1],
'FL_BASE_X': FL_BASE_X,
'_pipeUpper': _pipeUpper,
'_pipeLower': _pipeLower,
'score': score,
'playerObjectVelY': playerObjectVelY,
'playerObjectRot': playerObjectRot
}
# check for score
playerObjectMidPos = playerObjectx + FL_IMAGE_BOUNDARY['playerObject'][0].get_width() / 2
for pipe in _pipeUpper:
pipeMidPos = pipe['x'] + FL_IMAGE_BOUNDARY['pipe'][0].get_width() / 2
if pipeMidPos <= playerObjectMidPos < pipeMidPos + 4:
score += 1
FL_SOUND_BOUNDARY['treyway'].play()
# playerObjectIndex FL_BASE_X change
if (mLoopIterator + 1) % 3 == 0:
playerObjectIndex = next(playerObjectIndexGen)
mLoopIterator = (mLoopIterator + 1) % 30
FL_BASE_X = -((-FL_BASE_X + 100) % baseShift)
# rotate the playerObject
if playerObjectRot > -90:
playerObjectRot -= playerObjectVelRot
# playerObject's movement
if playerObjectVelY < playerObjectMaxVelY and not playerObjectFlapped:
playerObjectVelY += playerObjectAccY
if playerObjectFlapped:
playerObjectFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
playerObjectRot = 45
playerObjectHeight = FL_IMAGE_BOUNDARY['playerObject'][playerObjectIndex].get_height()
playerObjecty += min(playerObjectVelY, FL_BASE_Y - playerObjecty - playerObjectHeight)
# move pipes to left
for _pipeIterA, _pipeIterB in zip(_pipeUpper, _pipeLower):
_pipeIterA['x'] += pipeVelX
_pipeIterB['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < _pipeUpper[0]['x'] < 5:
newPipe = RandomPipeGetter()
_pipeUpper.append(newPipe[0])
_pipeLower.append(newPipe[1])
# remove first pipe if its out of the screen
if _pipeUpper[0]['x'] < -FL_IMAGE_BOUNDARY['pipe'][0].get_width():
_pipeUpper.pop(0)
_pipeLower.pop(0)
# draw gamesprites
scr._KSCREEN.blit(FL_IMAGE_BOUNDARY['background'], (0,0))
for _pipeIterA, _pipeIterB in zip(_pipeUpper, _pipeLower):
scr._KSCREEN.blit(FL_IMAGE_BOUNDARY['pipe'][0], (_pipeIterA['x'], _pipeIterA['y']))
scr._KSCREEN.blit(FL_IMAGE_BOUNDARY['pipe'][1], (_pipeIterB['x'], _pipeIterB['y']))
scr._KSCREEN.blit(FL_IMAGE_BOUNDARY['base'], (FL_BASE_X, FL_BASE_Y))
# print score so playerObject overlaps the score
ScoreViewer(score)
# playerObject rotation has a threshold
visibleRot = playerObjectRotThr
if playerObjectRot <= playerObjectRotThr:
visibleRot = playerObjectRot
playerObjectSurface = pygame.transform.rotate(FL_IMAGE_BOUNDARY['playerObject'][playerObjectIndex], visibleRot)
scr._KSCREEN.blit(playerObjectSurface, (playerObjectx, playerObjecty))
pygame.display.update()
scr._KFPSCLOCK.tick(util.FPS_BUFFERING_LIMIT)
``` |
{
"source": "Josh1108/GalaXC",
"score": 2
} |
#### File: Josh1108/GalaXC/network.py
```python
import nmslib
from typing import Callable
import logging
import torch
import numpy as np
import math
from scipy.sparse import csr_matrix, lil_matrix
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.utils.data
import logging
class MeanAggregator(nn.Module):
"""Aggregates a node's embeddings using mean of neighbors' embeddings."""
def __init__(self, features: Callable[[torch.Tensor], torch.Tensor]):
super(MeanAggregator, self).__init__()
self.features = features
def forward(self, neighs: torch.Tensor, node_count: int, device):
neigh_feats = self.features(neighs).to(device)
nb_count = int(neigh_feats.shape[0] / node_count)
fv_by_node = neigh_feats.view(
node_count, nb_count, neigh_feats.shape[-1])
return fv_by_node.mean(1)
class SumAggregator(nn.Module):
"""Aggregates a node's embeddings using mean of neighbors' embeddings."""
def __init__(self, features: Callable[[torch.Tensor], torch.Tensor]):
super(SumAggregator, self).__init__()
self.features = features
def forward(self, neighs: torch.Tensor, node_count: int, device):
neigh_feats = self.features(neighs).to(device)
nb_count = int(neigh_feats.shape[0] / node_count)
fv_by_node = neigh_feats.view(
node_count, nb_count, neigh_feats.shape[-1])
return fv_by_node.sum(1)
class SaintEncoder(nn.Module):
"""Encode a node's using 'convolutional' GraphSaint approach."""
def __init__(
self,
features,
query_func,
device_name,
feature_dim: int,
aggregator: nn.Module,
num_sample: int,
intermediate_dim: int,
embed_dim: int = 300,
activation_fn: callable = F.relu,
base_model=None,
):
super(SaintEncoder, self).__init__()
self.device_name = device_name
if base_model:
self.base_model = base_model
self.features = features
if query_func is None:
self.query_func = self.query_feature
else:
self.query_func = query_func
self.aggregator = aggregator
self.num_sample = num_sample
self.activation_fn = activation_fn
self.weight_1 = nn.Parameter(
torch.FloatTensor(
embed_dim // 2,
intermediate_dim))
self.weight_2 = nn.Parameter(
torch.FloatTensor(
embed_dim // 2,
intermediate_dim))
nn.init.xavier_uniform_(self.weight_1)
nn.init.xavier_uniform_(self.weight_2)
def query(
self,
nodes: np.array,
graph
):
context = {}
neigh_nodes = graph.sample_neighbors(nodes, self.num_sample)[
0
].flatten()
context["node_feats"] = self.query_func(
nodes, graph
)
context["neighbor_feats"] = self.query_func(
neigh_nodes, graph
)
context["node_count"] = len(nodes)
return context
def query_feature(
self,
nodes: np.array,
graph
):
features = graph.node_features(nodes)
return features
def forward(self, context: dict):
"""Generate embeddings for a batch of nodes."""
neigh_feats = self.aggregator.forward(
context["neighbor_feats"], context["node_count"], self.device_name
)
self_feats = self.features(context["node_feats"]).to(self.device_name)
# print (neigh_feats.shape, self_feats.shape)
combined = torch.cat(
[self.weight_1.mm(self_feats.t()), self.weight_2.mm(neigh_feats.t())], dim=0)
combined = self.activation_fn(combined)
return combined
class SageEncoder(nn.Module):
"""Encode a node's using 'convolutional' GraphSage approach."""
def __init__(
self,
features,
query_func,
device_name,
feature_dim: int,
aggregator: nn.Module,
num_sample: int,
intermediate_dim: int,
embed_dim: int = 300,
activation_fn: callable = F.relu,
base_model=None,
):
super(SageEncoder, self).__init__()
self.device_name = device_name
if base_model:
self.base_model = base_model
self.features = features
if query_func is None:
self.query_func = self.query_feature
else:
self.query_func = query_func
self.aggregator = aggregator
self.num_sample = num_sample
self.activation_fn = activation_fn
self.weight = nn.Parameter(
torch.FloatTensor(
embed_dim,
2 * intermediate_dim))
nn.init.xavier_uniform_(self.weight)
def query(
self,
nodes: np.array,
graph,
):
context = {}
print("number of nodes",nodes)
print("num of samples",self.num_sample)
neigh_nodes = graph.sample_neighbors(nodes, self.num_sample)[
0
].flatten()
print("neighboring nodes",neigh_nodes)
context["node_feats"] = self.query_func(
nodes, graph
)
context["neighbor_feats"] = self.query_func(
neigh_nodes, graph
)
context["node_count"] = len(nodes)
return context
def query_feature(
self,
nodes: np.array,
graph,
):
features = graph.node_features(
nodes
)
return features
def forward(self, context: dict):
"""Generate embeddings for a batch of nodes."""
neigh_feats = self.aggregator.forward(
context["neighbor_feats"], context["node_count"], self.device_name
)
self_feats = self.features(context["node_feats"]).to(self.device_name)
combined = torch.cat([self_feats, neigh_feats], dim=1)
combined = self.activation_fn(self.weight.mm(combined.t()))
return combined
class GINEncoder(nn.Module):
"""Encode a node's using 'convolutional' GIN approach."""
def __init__(
self,
features,
query_func,
device_name,
feature_dim: int,
aggregator: nn.Module,
num_sample: int,
intermediate_dim: int,
embed_dim: int = 300,
activation_fn: callable = F.relu,
base_model=None,
):
super(GINEncoder, self).__init__()
self.device_name = device_name
if base_model:
self.base_model = base_model
self.features = features
if query_func is None:
self.query_func = self.query_feature
else:
self.query_func = query_func
self.aggregator = aggregator
self.num_sample = num_sample
self.activation_fn = activation_fn
self.eps = nn.Parameter(torch.rand(1))
def query(
self,
nodes: np.array,
graph
):
context = {}
neigh_nodes = graph.sample_neighbors(nodes, self.num_sample)[
0
].flatten() # We get sampled nodes. Repeat nodes if number of nodes<sample
context["node_feats"] = self.query_func(
nodes, graph
)
context["neighbor_feats"] = self.query_func(
neigh_nodes, graph
)
context["node_count"] = len(nodes)
return context
def query_feature(
self,
nodes: np.array,
graph,
):
features = graph.node_features(
nodes
)
return features
def forward(self, context: dict):
"""Generate embeddings for a batch of nodes."""
neigh_feats = self.aggregator.forward(
context["neighbor_feats"], context["node_count"], self.device_name
)
self_feats = self.features(context["node_feats"]).to(self.device_name)
combined = torch.add(neigh_feats, (1.0 + self.eps) * self_feats)
return combined.t()
class LinearChunk(nn.Module):
"""One part for distributed fully connected layer"""
def __init__(self, input_size, output_size, device_embeddings, bias=True):
super(LinearChunk, self).__init__()
self.device_embeddings = device_embeddings
self.input_size = input_size
self.output_size = output_size
self.weight = Parameter(
torch.Tensor(
self.output_size,
self.input_size))
if bias:
self.bias = Parameter(torch.Tensor(self.output_size, ))
else:
self.register_parameter('bias', None)
self.attention_weights = Parameter(torch.Tensor(self.output_size, 3))
self.sparse = False
self.reset_parameters()
self.act = torch.nn.Softmax(dim=1)
def forward(self, input):
if(input[1] is None):
w = self.weight.unsqueeze(
1) * (self.act(self.attention_weights).unsqueeze(2))
x = input[0].mm(w.view((-1, input[0].shape[-1])).t()
) + self.bias.view(-1)
return x
else:
if len(input[1].shape) == 1:
# 350K X 1 X 300 350K X 3 X 1
# .permute(0, 2, 1).reshape(-1, 900)
w = (self.weight[input[1]].unsqueeze(
1)) * (self.act(self.attention_weights[input[1]])).unsqueeze(2)
x = input[0].mm(w.view((-1, input[0].shape[-1])).t()
) + self.bias[input[1]].view(-1)
return x
elif len(input[1].shape) == 2:
short_weights = F.embedding(input[1].to(self.device_embeddings),
self.weight,
sparse=self.sparse).view(input[1].shape[0] * input[1].shape[1], -1)
short_bias = F.embedding(input[1].to(self.device_embeddings),
self.bias.view(-1, 1),
sparse=self.sparse)
short_att = F.embedding(input[1].to(self.device_embeddings),
self.attention_weights,
sparse=self.sparse).view(input[1].shape[0] * input[1].shape[1], -1)
w = short_weights.unsqueeze(
1) * (self.act(short_att).unsqueeze(2))
x = input[0].unsqueeze(1).repeat(1,
input[1].shape[1],
1) * w.view((input[1].shape[0],
input[1].shape[1],
input[0].shape[-1]))
x = x.sum(axis=2) + short_bias.squeeze()
return x
def move_to_devices(self):
super().to(self.device_embeddings)
def reset_parameters(self):
nn.init.normal_(self.attention_weights)
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
class LinearDistributed(nn.Module):
"""Distributed fully connected layer"""
def __init__(self, input_size, output_size, device_embeddings):
super(LinearDistributed, self).__init__()
self.num_partitions = len(device_embeddings)
self.device_embeddings = device_embeddings
self.input_size = input_size
self.output_size = output_size
self.partition_size = math.ceil(output_size / self.num_partitions)
self.partition_indices = []
for i in range(self.num_partitions):
_start = i * self.partition_size
_end = min(_start + self.partition_size, output_size)
self.partition_indices.append((_start, _end))
print(self.partition_indices)
self.classifiers = nn.ModuleList()
for i in range(len(self.device_embeddings)):
output_size = self.partition_indices[i][1] - \
self.partition_indices[i][0]
self.classifiers.append(
LinearChunk(
input_size,
output_size,
self.device_embeddings[i]))
self.reset_parameters()
def forward(self, input):
if(input[1] is None):
total_x = []
for i in range(len(self.device_embeddings)):
embed = input[0].to(self.device_embeddings[i])
x = self.classifiers[i]((embed, None))
total_x.append(x.to(self.device_embeddings[0]))
total_x = torch.cat(total_x, dim=1)
return total_x
else:
if len(input[1].shape) == 1:
total_x = []
for i in range(len(self.device_embeddings)):
_start = self.partition_indices[i][0]
_end = self.partition_indices[i][1]
embed = input[0].to(self.device_embeddings[i])
indices = input[1][_start: _end]
x = self.classifiers[i]((embed, indices))
total_x.append(x.to(self.device_embeddings[0]))
total_x = torch.cat(total_x, dim=1)
return total_x
elif len(input[1].shape) == 2:
partition_length = input[1].shape[1] // len(
self.partition_indices)
total_x = []
for i in range(len(self.device_embeddings)):
embed = input[0].to(self.device_embeddings[i])
short = input[1][:, i *
partition_length: (i + 1) * partition_length]
x = self.classifiers[i]((embed, short))
total_x.append(x.to(self.device_embeddings[0]))
total_x = torch.cat(total_x, dim=1)
return total_x
def move_to_devices(self):
print("Moving to different devices...")
for i in range(len(self.device_embeddings)):
self.classifiers[i].move_to_devices()
def reset_parameters(self):
for i in range(len(self.device_embeddings)):
self.classifiers[i].reset_parameters()
class Residual(nn.Module):
"""Residual layer implementation"""
def __init__(self, input_size, output_size, dropout, init='eye'):
super(Residual, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.init = init
self.dropout = dropout
self.padding_size = self.output_size - self.input_size
self.hidden_layer = nn.Sequential(nn.Linear(self.input_size,
self.output_size),
nn.BatchNorm1d(self.output_size),
nn.ReLU(),
nn.Dropout(self.dropout))
self.initialize(self.init)
def forward(self, embed):
temp = F.pad(embed, (0, self.padding_size), 'constant', 0)
embed = self.hidden_layer(embed) + temp
return embed
def initialize(self, init_type):
if init_type == 'random':
nn.init.xavier_uniform_(
self.hidden_layer[0].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.constant_(self.hidden_layer[0].bias, 0.0)
else:
print("Using eye to initialize!")
nn.init.eye_(self.hidden_layer[0].weight)
nn.init.constant_(self.hidden_layer[0].bias, 0.0)
class GalaXCBase(nn.Module):
"""Base class for GalaXC"""
def __init__(self, num_labels, hidden_dims, device_names,
feature_dim: int,
fanouts: list,
graph,
embed_dim: int,
dropout=0.5, num_clf_partitions=1, padding_idx=0):
super(GalaXCBase, self).__init__()
# only 1 or 2 hops are allowed.
assert len(fanouts) in [1, 2, 3]
self.graph = graph
self.fanouts = fanouts
self.num_labels = num_labels
self.feature_dim = feature_dim
self.hidden_dims = hidden_dims
self.embed_dim = embed_dim
self.device_names = device_names
self.device_name = self.device_names[0]
self.device_embeddings = torch.device(self.device_name)
self.dropout = dropout
self.padding_idx = padding_idx
self.num_clf_partitions = num_clf_partitions
self._construct_embeddings()
self.transform1 = self._construct_transform()
self.transform2 = self._construct_transform()
self.transform3 = self._construct_transform()
self.classifier = self._construct_classifier()
def query(self, context: dict):
context["encoder"] = self.third_layer_enc.query(
context["inputs"],
self.graph
)
def _construct_transform(self):
return nn.Sequential(nn.ReLU(), nn.Dropout(self.dropout), Residual(
self.embed_dim, self.hidden_dims, self.dropout))
def _construct_classifier(self):
return LinearDistributed(
self.hidden_dims, self.num_labels, self.device_names)
def _construct_embeddings(self):
"""
Some calculation is repeated. Optimizing doesn't help much, keeping for simplicity.
"""
def feature_func(features): return features.squeeze(0)
self.first_layer_enc = GINEncoder(
features=feature_func,
query_func=None,
feature_dim=self.feature_dim,
intermediate_dim=self.feature_dim,
aggregator=SumAggregator(feature_func),
embed_dim=self.embed_dim,
num_sample=self.fanouts[0],
device_name=self.device_name
)
self.second_layer_enc = GINEncoder(
features=lambda context: self.first_layer_enc(context).t(),
query_func=self.first_layer_enc.query,
feature_dim=self.feature_dim,
intermediate_dim=self.embed_dim,
aggregator=SumAggregator(
lambda context: self.first_layer_enc(context).t()
),
embed_dim=self.embed_dim,
num_sample=self.fanouts[1],
base_model=self.first_layer_enc,
device_name=self.device_name
)
self.third_layer_enc = GINEncoder(
features=lambda context: self.second_layer_enc(context).t(),
query_func=self.second_layer_enc.query,
feature_dim=self.feature_dim,
intermediate_dim=self.embed_dim,
aggregator=SumAggregator(
lambda context: self.second_layer_enc(context).t()
),
embed_dim=self.embed_dim,
num_sample=self.fanouts[2],
base_model=self.second_layer_enc,
device_name=self.device_name
)
def encode(self, context):
embed3 = self.third_layer_enc(context["encoder"])
embed2 = self.second_layer_enc(context["encoder"]["node_feats"])
embed1 = self.first_layer_enc(
context["encoder"]["node_feats"]["node_feats"])
embed = torch.cat(
(self.transform1(
embed1.t()), self.transform2(
embed2.t()), self.transform3(
embed3.t())), dim=1)
return embed
def encode_graph_embedding(self, context):
embed = self.embeddings(context["encoder"], self.device_embeddings)
return embed.t()
def forward(self, batch_data, only_head=True):
encoded = self.encode(batch_data)
return self.classifier((encoded, batch_data["label_ids"]))
def initialize_embeddings(self, word_embeddings):
self.embeddings.weight.data.copy_(torch.from_numpy(word_embeddings))
def initialize_classifier(self, clf_weights):
self.classifier.weight.data.copy_(torch.from_numpy(clf_weights[:, -1]))
self.classifier.bias.data.copy_(
torch.from_numpy(clf_weights[:, -1]).view(-1, 1))
def get_clf_weights(self):
return self.classifier.get_weights()
def move_to_devices(self):
self.third_layer_enc.to(self.device_embeddings)
self.transform1.to(self.device_embeddings)
self.transform2.to(self.device_embeddings)
self.transform3.to(self.device_embeddings)
self.classifier.move_to_devices()
@property
def num_trainable_params(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@property
def model_size(self):
return self.num_trainable_params * 4 / math.pow(2, 20)
class HNSW(object):
"""HNSW ANNS implementation"""
def __init__(self, M, efC, efS, num_threads):
self.index = nmslib.init(method='hnsw', space='cosinesimil')
self.M = M
self.num_threads = num_threads
self.efC = efC
self.efS = efS
def fit(self, data, print_progress=True):
self.index.addDataPointBatch(data)
self.index.createIndex(
{'M': self.M,
'indexThreadQty': self.num_threads,
'efConstruction': self.efC},
print_progress=print_progress
)
def _filter(self, output, num_search):
indices = np.zeros((len(output), num_search), dtype=np.int32)
distances = np.zeros((len(output), num_search), dtype=np.float32)
for idx, item in enumerate(output):
indices[idx] = item[0]
distances[idx] = item[1]
return indices, distances
def predict(self, data, num_search):
self.index.setQueryTimeParams({'efSearch': self.efS})
output = self.index.knnQueryBatch(
data, k=num_search, num_threads=self.num_threads
)
indices, distances = self._filter(output, num_search)
return indices, distances
def save(self, fname):
nmslib.saveIndex(self.index, fname)
```
#### File: Josh1108/GalaXC/predict_main.py
```python
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.utils.data
import numpy as np
import math
import time
import os
import pickle
import random
import nmslib
import sys
from scipy.sparse import csr_matrix, lil_matrix, load_npz, hstack, vstack
from xclib.data import data_utils
from xclib.utils.sparse import normalize
import xclib.evaluation.xc_metrics as xc_metrics
from data import *
from utils import *
from network import HNSW
def predict(net, pred_batch):
"""
head shorty None means predict OvA on head
"""
net.eval()
torch.set_grad_enabled(False)
out_ans = net.forward(pred_batch, False)
out_ans = out_ans.detach().cpu().numpy()
if(pred_batch["label_ids"] is None):
return out_ans, None
return out_ans, pred_batch["label_ids"].detach().cpu().numpy()
def update_predicted(row_indices, predicted_batch_labels,
predicted_labels, remapping, top_k):
batch_size = row_indices.shape[0]
top_values, top_indices = predicted_batch_labels.topk(
k=top_k, dim=1, sorted=False)
ind = np.zeros((top_k * batch_size, 2), dtype=np.int64)
ind[:, 0] = np.repeat(row_indices, [top_k] * batch_size)
if(remapping is not None):
ind[:, 1] = [remapping[x]
for x in top_indices.cpu().numpy().flatten('C')]
else:
ind[:, 1] = [x for x in top_indices.cpu().numpy().flatten('C')]
vals = top_values.cpu().detach().numpy().flatten('C')
predicted_labels[ind[:, 0], ind[:, 1]] = vals
def update_predicted_shortlist(
row_indices, predicted_batch_labels, predicted_labels, shortlist, remapping, top_k=10):
if(len(predicted_batch_labels.shape) == 1):
predicted_batch_labels = predicted_batch_labels[None, :]
m = predicted_batch_labels.shape[0]
top_indices = np.argsort(predicted_batch_labels, axis=1)[
:, ::-1][:, :top_k]
top_values = predicted_batch_labels[np.arange(m)[:, None], top_indices]
batch_size, shortlist_size = shortlist.shape
ind = np.zeros((top_k * batch_size, 2), dtype=np.int)
ind[:, 0] = np.repeat(row_indices, [top_k] * batch_size)
if(remapping is not None):
ind[:, 1] = [remapping[x]
for x in np.ravel(shortlist[np.arange(m)[:, None], top_indices])]
else:
ind[:, 1] = [x for x in np.ravel(
shortlist[np.arange(m)[:, None], top_indices])]
predicted_labels[ind[:, 0], ind[:, 1]] = np.ravel(top_values)
def run_validation(val_predicted_labels, tst_X_Y_val,
tst_exact_remove, tst_X_Y_trn, inv_prop,dir):
data = []
indptr = [0]
indices = []
for i in range(val_predicted_labels.shape[0]):
_indices1 = val_predicted_labels.indices[val_predicted_labels.indptr[i]: val_predicted_labels.indptr[i + 1]]
_vals1 = val_predicted_labels.data[val_predicted_labels.indptr[i]: val_predicted_labels.indptr[i + 1]]
_indices, _vals = [], []
for _ind, _val in zip(_indices1, _vals1):
if (_ind not in tst_exact_remove[i]) and (
_ind not in tst_X_Y_trn.indices[tst_X_Y_trn.indptr[i]: tst_X_Y_trn.indptr[i + 1]]):
_indices.append(_ind)
_vals.append(_val)
indices += list(_indices)
data += list(_vals)
indptr.append(len(indices))
_pred = csr_matrix(
(data, indices, indptr), shape=(
val_predicted_labels.shape))
print()
# acc = xc_metrics.Metrics(tst_X_Y_val, inv_psp=inv_prop)
# acc = acc.eval(_pred, 5)
recall_lis =[]
prec_lis =[]
for num in [5,10,20,30,50,100]:
_rec = recall(tst_X_Y_val, _pred, num,dir)
recall_lis.append(_rec)
_prec = precision(tst_X_Y_val,_pred,num,dir)
prec_lis.append(_prec)
return recall_lis,prec_lis
def encode_nodes(net, context):
net.eval()
torch.set_grad_enabled(False)
embed3 = net.third_layer_enc(context["encoder"])
embed2 = net.second_layer_enc(context["encoder"]["node_feats"])
embed1 = net.first_layer_enc(
context["encoder"]["node_feats"]["node_feats"])
# embed = torch.stack((net.transform1(embed1.t()), net.transform2(embed2.t()), net.transform3(embed3.t())), dim=1)
embed = torch.stack((embed1.t(), embed2.t(), embed3.t()), dim=1)
embed = torch.mean(embed, dim=1)
return embed
def validate(head_net, params, partition_indices, label_remapping,
label_embs, tst_point_embs, tst_X_Y_val, tst_exact_remove, tst_X_Y_trn, use_graph_embs, topK,dir):
_start = params["num_trn"]
_end = _start + params["num_tst"]
if(use_graph_embs):
label_nodes = [label_remapping[i] for i in range(len(label_remapping))]
val_dataset = DatasetGraphPredictionEncode(label_nodes)
hce = GraphCollator(head_net, params["num_labels"], None, train=0)
encode_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=500,
num_workers=10,
collate_fn=hce,
shuffle=False,
pin_memory=True)
label_embs_graph = np.zeros(
(len(label_nodes), params["hidden_dims"]), dtype=np.float32)
cnt = 0
for batch in encode_loader:
# print (len(label_nodes), cnt*512)
cnt += 1
encoded = encode_nodes(head_net, batch)
encoded = encoded.detach().cpu().numpy()
label_embs_graph[batch["indices"]] = encoded
val_dataset = DatasetGraphPredictionEncode(
[i for i in range(_start, _end)])
hce = GraphCollator(head_net, params["num_labels"], None, train=0)
encode_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=500,
num_workers=10,
collate_fn=hce,
shuffle=False,
pin_memory=True)
tst_point_embs_graph = np.zeros(
(params["num_tst"], params["hidden_dims"]), dtype=np.float32)
for batch in encode_loader:
encoded = encode_nodes(head_net, batch)
encoded = encoded.detach().cpu().numpy()
tst_point_embs_graph[batch["indices"]] = encoded
label_features = label_embs_graph
tst_point_features = tst_point_embs_graph
else:
label_features = label_embs
tst_point_features = tst_point_embs[:params["num_tst"]]
prediction_shortlists = []
BATCH_SIZE = 2000000
t1 = time.time()
for i in range(len(partition_indices)):
print("building ANNS for partition = ", i)
label_NGS = HNSW(
M=100,
efC=300,
efS=params["num_shortlist"],
num_threads=24)
label_NGS.fit(
label_features[partition_indices[i][0]: partition_indices[i][1]])
print("Done in ", time.time() - t1)
t1 = time.time()
tst_label_nbrs = np.zeros(
(tst_point_features.shape[0],
params["num_shortlist"]),
dtype=np.int64)
for i in range(0, tst_point_features.shape[0], BATCH_SIZE):
print(i)
_tst_label_nbrs, _ = label_NGS.predict(
tst_point_features[i: i + BATCH_SIZE], params["num_shortlist"])
tst_label_nbrs[i: i + BATCH_SIZE] = _tst_label_nbrs
prediction_shortlists.append(tst_label_nbrs)
print("Done in ", time.time() - t1)
t1 = time.time()
if(len(partition_indices) == 1):
prediction_shortlist = prediction_shortlists[0]
else:
prediction_shortlist = np.hstack(prediction_shortlists)
print(prediction_shortlist.shape)
del(prediction_shortlists)
val_dataset = DatasetGraphPrediction(_start, _end, prediction_shortlist)
hcp = GraphCollator(head_net, params["num_labels"], None, train=0)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=512,
num_workers=10,
collate_fn=hcp,
shuffle=False,
pin_memory=True)
val_data = dict(val_labels=tst_X_Y_val[:params["num_tst"], :],
val_loader=val_loader)
val_predicted_labels = lil_matrix(val_data["val_labels"].shape)
with torch.set_grad_enabled(False):
for batch_idx, batch_data in enumerate(val_data["val_loader"]):
val_preds, val_short = predict(head_net, batch_data)
partition_length = val_short.shape[1] // len(partition_indices)
for i in range(1, len(partition_indices)):
val_short[:, i *
partition_length: (i +
1) *
partition_length] += partition_indices[i][0]
update_predicted_shortlist((batch_data["inputs"]) - _start, val_preds,
val_predicted_labels, val_short, None, topK)
acc = run_validation(val_predicted_labels.tocsr(
), val_data["val_labels"], tst_exact_remove, tst_X_Y_trn, params["inv_prop"],dir)
print("acc = {}".format(acc))
``` |
{
"source": "josh146/cibuildwheel",
"score": 3
} |
#### File: cibuildwheel/bin/run_test.py
```python
from __future__ import print_function
import os, sys, subprocess, shutil, json
from glob import glob
def single_run(test_project):
# load project settings into environment
env_file = os.path.join(test_project, 'environment.json')
project_env = {}
if os.path.exists(env_file):
with open(env_file) as f:
project_env = json.load(f)
# run the build
env = os.environ.copy()
project_env = {str(k): str(v) for k, v in project_env.items()} # unicode not allowed in env
env.update(project_env)
print('Building %s with environment %s' % (test_project, project_env))
subprocess.check_call([sys.executable, '-m', 'cibuildwheel', test_project], env=env)
wheels = glob('wheelhouse/*.whl')
print('%s built successfully. %i wheels built.' % (test_project, len(wheels)))
# check some wheels were actually built
assert len(wheels) >= 3
# clean up
shutil.rmtree('wheelhouse')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("test_project_dir")
args = parser.parse_args()
project_path = os.path.abspath(args.test_project_dir)
if not os.path.exists(project_path):
print('No test project not found.', file=sys.stderr)
exit(2)
single_run(project_path)
print('Project built successfully.')
```
#### File: cibuildwheel/cibuildwheel/windows.py
```python
from __future__ import print_function
import os, tempfile, subprocess, sys, shutil
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from collections import namedtuple
from glob import glob
from .util import prepare_command, get_build_verbosity_extra_flags
def build(project_dir, output_dir, test_command, test_requires, before_build, build_verbosity, build_selector, environment):
# run_with_env is a cmd file that sets the right environment variables to
run_with_env = os.path.join(tempfile.gettempdir(), 'appveyor_run_with_env.cmd')
if not os.path.exists(run_with_env):
with open(run_with_env, 'wb') as f:
request = urlopen('https://github.com/ogrisel/python-appveyor-demo/raw/09a1c8672e5015a74d8f69d07add6ee803c176ec/appveyor/run_with_env.cmd')
f.write(request.read())
def shell(args, env=None, cwd=None):
# print the command executing for the logs
print('+ ' + ' '.join(args))
args = ['cmd', '/E:ON', '/V:ON', '/C', run_with_env] + args
return subprocess.check_call(' '.join(args), env=env, cwd=cwd)
PythonConfiguration = namedtuple('PythonConfiguration', ['version', 'arch', 'identifier', 'path'])
python_configurations = [
PythonConfiguration(version='2.7.x', arch="32", identifier='cp27-win32', path='C:\Python27'),
PythonConfiguration(version='2.7.x', arch="64", identifier='cp27-win_amd64', path='C:\Python27-x64'),
PythonConfiguration(version='3.4.x', arch="32", identifier='cp34-win32', path='C:\Python34'),
PythonConfiguration(version='3.4.x', arch="64", identifier='cp34-win_amd64', path='C:\Python34-x64'),
PythonConfiguration(version='3.5.x', arch="32", identifier='cp35-win32', path='C:\Python35'),
PythonConfiguration(version='3.5.x', arch="64", identifier='cp35-win_amd64', path='C:\Python35-x64'),
PythonConfiguration(version='3.6.x', arch="32", identifier='cp36-win32', path='C:\Python36'),
PythonConfiguration(version='3.6.x', arch="64", identifier='cp36-win_amd64', path='C:\Python36-x64'),
PythonConfiguration(version='3.7.x', arch="32", identifier='cp37-win32', path='C:\Python37'),
PythonConfiguration(version='3.7.x', arch="64", identifier='cp37-win_amd64', path='C:\Python37-x64'),
]
abs_project_dir = os.path.abspath(project_dir)
temp_dir = tempfile.mkdtemp(prefix='cibuildwheel')
built_wheel_dir = os.path.join(temp_dir, 'built_wheel')
for config in python_configurations:
if not build_selector(config.identifier):
print('cibuildwheel: Skipping build %s' % config.identifier, file=sys.stderr)
continue
# check python & pip exist for this configuration
assert os.path.exists(os.path.join(config.path, 'python.exe'))
assert os.path.exists(os.path.join(config.path, 'Scripts', 'pip.exe'))
# setup dirs
if os.path.exists(built_wheel_dir):
shutil.rmtree(built_wheel_dir)
os.makedirs(built_wheel_dir)
env = os.environ.copy()
# set up environment variables for run_with_env
env['PYTHON_VERSION'] = config.version
env['PYTHON_ARCH'] = config.arch
env['PATH'] = os.pathsep.join([
config.path,
os.path.join(config.path, 'Scripts'),
env['PATH']
])
env = environment.as_dictionary(prev_environment=env)
# for the logs - check we're running the right version of python
shell(['python', '--version'], env=env)
shell(['python', '-c', '"import struct; print(struct.calcsize(\'P\') * 8)\"'], env=env)
# prepare the Python environment
shell(['python', '-m', 'pip', 'install', '--upgrade', 'pip'],
env=env)
shell(['pip', 'install', '--upgrade', 'setuptools'], env=env)
shell(['pip', 'install', 'wheel'], env=env)
# run the before_build command
if before_build:
before_build_prepared = prepare_command(before_build, project=abs_project_dir)
shell([before_build_prepared], env=env)
# build the wheel
shell(['pip', 'wheel', abs_project_dir, '-w', built_wheel_dir, '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)
built_wheel = glob(built_wheel_dir+'/*.whl')[0]
# install the wheel
shell(['pip', 'install', built_wheel], env=env)
# test the wheel
if test_requires:
shell(['pip', 'install'] + test_requires, env=env)
if test_command:
# run the tests from c:\, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
# and not the repo code)
test_command_prepared = prepare_command(test_command, project=abs_project_dir)
shell([test_command_prepared], cwd='c:\\', env=env)
# we're all done here; move it to output (remove if already exists)
dst = os.path.join(output_dir, os.path.basename(built_wheel))
if os.path.isfile(dst):
os.remove(dst)
shutil.move(built_wheel, dst)
``` |
{
"source": "josh146/OpenFermion",
"score": 2
} |
#### File: openfermion/tests/_lih_integration_test.py
```python
from __future__ import absolute_import
import os
import numpy
import scipy.sparse
import unittest
from openfermion.config import *
from openfermion.hamiltonians import *
from openfermion.ops import *
from openfermion.transforms import *
from openfermion.utils import *
class LiHIntegrationTest(unittest.TestCase):
def setUp(self):
# Set up molecule.
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(THIS_DIRECTORY, 'data',
'H1-Li1_sto-3g_singlet_1.45')
self.molecule = MolecularData(
geometry, basis, multiplicity, filename=filename)
self.molecule.load()
# Get molecular Hamiltonian.
self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian()
self.molecular_hamiltonian_no_core = (
self.molecule.
get_molecular_hamiltonian(occupied_indices=[0],
active_indices=range(1,
self.molecule.
n_orbitals)))
# Get FCI RDM.
self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get fermion Hamiltonian.
self.fermion_hamiltonian = normal_ordered(get_fermion_operator(
self.molecular_hamiltonian))
# Get qubit Hamiltonian.
self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get matrix form.
self.hamiltonian_matrix = get_sparse_operator(
self.molecular_hamiltonian)
self.hamiltonian_matrix_no_core = get_sparse_operator(
self.molecular_hamiltonian_no_core)
def test_all(self):
# Test reverse Jordan-Wigner.
fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test mapping to interaction operator.
fermion_hamiltonian = get_fermion_operator(self.molecular_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test RDM energy.
fci_rdm_energy = self.nuclear_repulsion
fci_rdm_energy += numpy.sum(self.fci_rdm.one_body_tensor *
self.one_body)
fci_rdm_energy += numpy.sum(self.fci_rdm.two_body_tensor *
self.two_body)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Confirm expectation on qubit Hamiltonian using reverse JW matches.
qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian)
qubit_energy = 0.0
for term, coefficient in qubit_rdm.terms.items():
qubit_energy += coefficient * self.qubit_hamiltonian.terms[term]
self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy)
# Confirm fermionic RDMs can be built from measured qubit RDMs.
new_fermi_rdm = get_interaction_rdm(qubit_rdm)
fermi_rdm_energy = new_fermi_rdm.expectation(
self.molecular_hamiltonian)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Test sparse matrices.
energy, wavefunction = get_ground_state(self.hamiltonian_matrix)
self.assertAlmostEqual(energy, self.molecule.fci_energy)
expected_energy = expectation(self.hamiltonian_matrix, wavefunction)
self.assertAlmostEqual(expected_energy, energy)
# Make sure you can reproduce Hartree-Fock energy.
hf_state = jw_hartree_fock_state(
self.molecule.n_electrons, count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.])
expected_hf_density_energy = expectation(self.hamiltonian_matrix,
hf_density)
expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state)
self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy)
self.assertAlmostEqual(expected_hf_density_energy,
self.molecule.hf_energy)
# Check that frozen core result matches frozen core FCI from psi4.
# Recore frozen core result from external calculation.
self.frozen_core_fci_energy = -7.8807607374168
no_core_fci_energy = scipy.linalg.eigh(
self.hamiltonian_matrix_no_core.todense())[0][0]
self.assertAlmostEqual(no_core_fci_energy,
self.frozen_core_fci_energy)
# Check that the freeze_orbitals function has the same effect as the
# as the occupied_indices option of get_molecular_hamiltonian.
frozen_hamiltonian = freeze_orbitals(
get_fermion_operator(self.molecular_hamiltonian), [0, 1])
self.assertTrue(frozen_hamiltonian ==
get_fermion_operator(self.molecular_hamiltonian_no_core))
```
#### File: openfermion/transforms/_weyl_ordering.py
```python
from scipy.special import binom
from openfermion.ops import (BosonOperator, QuadOperator)
def mccoy(mode, op_a, op_b, m, n):
""" Implement the McCoy formula on two operators of the
form op_a^m op_b^n.
Args:
mode (int): the mode number the two operators act on.
op_a: the label of operator a. This can be any hashable type.
op_b: the label of operator b. This can be any hashable type.
m (int): the power of operator a.
n (int): the power of operator b.
"""
new_op = dict()
for r in range(0, n+1):
coeff = binom(n, r)/(2**n)
new_term = tuple([(mode, op_b)]*r + [(mode, op_a)]*m
+ [(mode, op_b)]*(n-r))
if new_term not in new_op:
new_op[tuple(new_term)] = coeff
else:
new_op[tuple(new_term)] += coeff
return new_op
def weyl_polynomial_quantization(polynomial):
r""" Apply the Weyl quantization to a phase space polynomial.
The Weyl quantization is performed by applying McCoy's formula
directly to a polynomial term of the form q^m p^n:
q^m p^n ->
(1/ 2^n) sum_{r=0}^{n} Binomial(n, r) \hat{q}^r \hat{p}^m q^{n-r}
where q and p are phase space variables, and \hat{q} and \hat{p}
are quadrature operators.
The input is provided in the form of a string, for example
.. code-block:: python
weyl_polynomial_quantization('q0^2 p0^3 q1^3')
where 'q' or 'p' is the phase space quadrature variable, the integer
directly following is the mode it is with respect to, and '^2' is the
polynomial power.
Args:
polynomial (str): polynomial function of q and p of the form
'qi^m pj^n ...' where i,j are the modes, and m, n the powers.
Returns:
QuadOperator: the Weyl quantization of the phase space function.
Warning:
The runtime of this method is exponential in the maximum locality
of the original operator.
"""
# construct the equivalent QuadOperator
poly = dict()
if polynomial:
for term in polynomial.split():
if '^' in term:
op, pwr = term.split('^')
pwr = int(pwr)
else:
op = term
pwr = 1
mode = int(op[1:])
if mode not in poly:
poly[mode] = [0, 0]
if op[0] == 'q':
poly[mode][0] += pwr
elif op[0] == 'p':
poly[mode][1] += pwr
# replace {q_i^m p_i^n} -> S({q_i^m p_i^n})
operator = QuadOperator('')
for mode, (m, n) in poly.items():
qtmp = QuadOperator()
qtmp.terms = mccoy(mode, 'q', 'p', m, n)
operator *= qtmp
else:
operator = QuadOperator.zero()
return operator
def symmetric_ordering(operator, ignore_coeff=True, ignore_identity=True):
""" Apply the symmetric ordering to a BosonOperator or QuadOperator.
The symmetric ordering is performed by applying McCoy's formula
directly to polynomial terms of quadrature operators:
q^m p^n -> (1/ 2^n) sum_{r=0}^{n} Binomial(n, r) q^r p^m q^{n-r}
Note: in general, symmetric ordering is performed on a single term
containing the tensor product of various operators. However, this
function can also be applied to a sum of these terms, and the symmetric
product is distributed over the summed terms.
In this case, Hermiticity cannot be guaranteed - as such, by default
term coefficients and identity operators are ignored. However, this
behavior can be modified via keyword arguments describe below if necessary.
Args:
operator: either a BosonOperator or QuadOperator.
ignore_coeff (bool): By default, the coefficients for
each term are ignored; S(a q^m p^n) = S(q^m p^n), and
the returned operator is always Hermitian.
If set to False, then instead the coefficients are taken into
account; S(q^m p^n) = a S(q^m p^n). In this case, if
a is a complex coefficient, it is not guaranteed that the
the returned operator will be Hermitian.
ignore_identity (bool): By default, identity terms are ignore;
S(I) = 0. If set to False, then instead S(I) = I.
Returns:
transformed_operator: an operator of the same class as in the input.
Warning:
The runtime of this method is exponential in the maximum locality
of the original operator.
"""
if isinstance(operator, BosonOperator):
transformed_operator = BosonOperator()
for term in operator.terms:
if ignore_coeff:
coeff = 1
else:
coeff = operator.terms[term]
# Initialize identity matrix.
transformed_term = BosonOperator('', coeff)
if term:
# convert term into the form \prod_i {bd_i^m b_i^n}
modes = dict()
for op in term:
if op[0] not in modes:
modes[op[0]] = [0, 0]
modes[op[0]][1-op[1]] += 1
# Replace {bd_i^m b_i^n} -> S({bd_i^m b_i^n})
for mode, (m, n) in modes.items():
qtmp = BosonOperator()
qtmp.terms = mccoy(mode, 1, 0, m, n)
transformed_term *= qtmp
if term or (not ignore_identity):
transformed_operator += transformed_term
elif isinstance(operator, QuadOperator):
transformed_operator = QuadOperator()
for term in operator.terms:
if ignore_coeff:
coeff = 1
else:
coeff = operator.terms[term]
# Initialize identity matrix.
transformed_term = QuadOperator('', coeff)
if term:
# convert term into the form \prod_i {q_i^m p_i^n}
modes = dict()
for op in term:
if op[0] not in modes:
modes[op[0]] = [0, 0]
if op[1] == 'q':
modes[op[0]][0] += 1
elif op[1] == 'p':
modes[op[0]][1] += 1
# replace {q_i^m p_i^n} -> S({q_i^m p_i^n})
for mode, (m, n) in modes.items():
qtmp = QuadOperator()
qtmp.terms = mccoy(mode, 'q', 'p', m, n)
transformed_term *= qtmp
if term or (not ignore_identity):
transformed_operator += transformed_term
else:
raise TypeError("operator must be a BosonOperator or "
"QuadOperator.")
return transformed_operator
```
#### File: openfermion/utils/_diagonal_coulomb_trotter_error.py
```python
import numpy
from future.utils import iteritems
from openfermion import (count_qubits,
FermionOperator,
get_fermion_operator,
normal_ordered)
from openfermion.utils._low_depth_trotter_error import (
simulation_ordered_grouped_low_depth_terms_with_info)
from openfermion.utils._commutator_diagonal_coulomb_operator import (
commutator_ordered_diagonal_coulomb_with_two_body_operator)
def diagonal_coulomb_potential_and_kinetic_terms_as_arrays(hamiltonian):
"""Give the potential and kinetic terms of a diagonal Coulomb Hamiltonian
as arrays.
Args:
hamiltonian (FermionOperator): The diagonal Coulomb Hamiltonian to
separate the potential and kinetic terms
for. Identity is arbitrarily chosen
to be part of the potential.
Returns:
Tuple of (potential_terms, kinetic_terms). Both elements of the tuple
are numpy arrays of FermionOperators.
"""
if not isinstance(hamiltonian, FermionOperator):
try:
hamiltonian = normal_ordered(get_fermion_operator(hamiltonian))
except TypeError:
raise TypeError('hamiltonian must be either a FermionOperator '
'or DiagonalCoulombHamiltonian.')
potential = FermionOperator.zero()
kinetic = FermionOperator.zero()
for term, coeff in iteritems(hamiltonian.terms):
acted = set(term[i][0] for i in range(len(term)))
if len(acted) == len(term) / 2:
potential += FermionOperator(term, coeff)
else:
kinetic += FermionOperator(term, coeff)
potential_terms = numpy.array(
[FermionOperator(term, coeff)
for term, coeff in iteritems(potential.terms)])
kinetic_terms = numpy.array(
[FermionOperator(term, coeff)
for term, coeff in iteritems(kinetic.terms)])
return (potential_terms, kinetic_terms)
def bit_mask_of_modes_acted_on_by_fermionic_terms(
fermion_term_list, n_qubits=None):
"""Create a mask of which modes of the system are acted on by which terms.
Args:
fermion_term_list (list of FermionOperators): A list of fermionic terms
to calculate the bitmask for.
n_qubits (int): The number of qubits (modes) in the system. If not
specified, defaults to the maximum of any term in
fermion_term_list.
Returns:
An n_qubits x len(fermion_term_list) boolean numpy array of whether
each term acts on the given mode index.
Raises:
ValueError: if n_qubits is too small for the given terms.
"""
if n_qubits is None:
n_qubits = 0
for term in fermion_term_list:
n_qubits = max(n_qubits, count_qubits(term))
mask = numpy.zeros((n_qubits, len(fermion_term_list)), dtype=bool)
for term_number, term in enumerate(fermion_term_list):
actions = term.terms
for action in actions:
for single_operator in action:
mode = single_operator[0]
try:
mask[mode][term_number] = True
except IndexError:
raise ValueError('Bad n_qubits: must be greater than '
'highest mode in any FermionOperator.')
return mask
def split_operator_trotter_error_operator_diagonal_two_body(hamiltonian,
order):
"""Compute the split-operator Trotter error of a diagonal two-body
Hamiltonian.
Args:
hamiltonian (FermionOperator): The diagonal Coulomb Hamiltonian to
compute the Trotter error for.
order (str): Whether to simulate the split-operator Trotter step
with the kinetic energy T first (order='T+V') or with
the potential energy V first (order='V+T').
Returns:
error_operator: The second-order Trotter error operator.
Notes:
The second-order split-operator Trotter error is calculated from the
double commutator [T, [V, T]] + [V, [V, T]] / 2 when T is simulated
before V (i.e. exp(-iTt/2) exp(-iVt) exp(-iTt/2)), and from the
double commutator [V, [T, V]] + [T, [T, V]] / 2 when V is simulated
before T, following Equation 9 of "The Trotter Step Size Required for
Accurate Quantum Simulation of Quantum Chemistry" by Poulin et al.
The Trotter error operator is then obtained by dividing by 12.
"""
n_qubits = count_qubits(hamiltonian)
potential_terms, kinetic_terms = (
diagonal_coulomb_potential_and_kinetic_terms_as_arrays(hamiltonian))
# Cache halved potential and kinetic terms for the second commutator.
halved_potential_terms = potential_terms / 2.0
halved_kinetic_terms = kinetic_terms / 2.0
# Assign the outer term of the second commutator based on the ordering.
outer_potential_terms = (halved_potential_terms if order == 'T+V' else
potential_terms)
outer_kinetic_terms = (halved_kinetic_terms if order == 'V+T' else
kinetic_terms)
potential_mask = bit_mask_of_modes_acted_on_by_fermionic_terms(
potential_terms, n_qubits)
kinetic_mask = bit_mask_of_modes_acted_on_by_fermionic_terms(
kinetic_terms, n_qubits)
error_operator = FermionOperator.zero()
for potential_term in potential_terms:
modes_acted_on_by_potential_term = set()
for potential_term_action in potential_term.terms:
modes_acted_on_by_potential_term.update(
set(operator[0] for operator in potential_term_action))
if not modes_acted_on_by_potential_term:
continue
potential_term_mode_mask = numpy.logical_or.reduce(
[kinetic_mask[mode] for mode in modes_acted_on_by_potential_term])
for kinetic_term in kinetic_terms[potential_term_mode_mask]:
inner_commutator_term = (
commutator_ordered_diagonal_coulomb_with_two_body_operator(
potential_term, kinetic_term))
modes_acted_on_by_inner_commutator = set()
for inner_commutator_action in inner_commutator_term.terms:
modes_acted_on_by_inner_commutator.update(
set(operator[0] for operator in inner_commutator_action))
if not modes_acted_on_by_inner_commutator:
continue
inner_commutator_mode_mask = numpy.logical_or.reduce(
[potential_mask[mode]
for mode in modes_acted_on_by_inner_commutator])
# halved_potential_terms for T+V order, potential_terms for V+T
for outer_potential_term in outer_potential_terms[
inner_commutator_mode_mask]:
commutator_ordered_diagonal_coulomb_with_two_body_operator(
outer_potential_term, inner_commutator_term,
prior_terms=error_operator)
inner_commutator_mode_mask = numpy.logical_or.reduce(
[kinetic_mask[qubit]
for qubit in modes_acted_on_by_inner_commutator])
# kinetic_terms for T+V order, halved_kinetic_terms for V+T
for outer_kinetic_term in outer_kinetic_terms[
inner_commutator_mode_mask]:
commutator_ordered_diagonal_coulomb_with_two_body_operator(
outer_kinetic_term, inner_commutator_term,
prior_terms=error_operator)
# Divide by 12 to match the error operator definition.
# If order='V+T', also flip the sign to account for inner_commutator_term
# not flipping between the different orderings.
if order == 'T+V':
error_operator /= 12.0
else:
error_operator /= -12.0
return error_operator
def fermionic_swap_trotter_error_operator_diagonal_two_body(
hamiltonian, external_potential_at_end=False):
"""Compute the fermionic swap network Trotter error of a diagonal
two-body Hamiltonian.
Args:
hamiltonian (FermionOperator): The diagonal Coulomb Hamiltonian to
compute the Trotter error for.
Returns:
error_operator: The second-order Trotter error operator.
Notes:
Follows Eq 9 of Poulin et al., arXiv:1406.4920, applied to the
Trotter step detailed in Kivlichan et al., arxiv:1711.04789.
"""
single_terms = numpy.array(
simulation_ordered_grouped_low_depth_terms_with_info(
hamiltonian,
external_potential_at_end=external_potential_at_end)[0])
# Cache the halved terms for use in the second commutator.
halved_single_terms = single_terms / 2.0
term_mode_mask = bit_mask_of_modes_acted_on_by_fermionic_terms(
single_terms, count_qubits(hamiltonian))
error_operator = FermionOperator.zero()
for beta, term_beta in enumerate(single_terms):
modes_acted_on_by_term_beta = set()
for beta_action in term_beta.terms:
modes_acted_on_by_term_beta.update(
set(operator[0] for operator in beta_action))
beta_mode_mask = numpy.logical_or.reduce(
[term_mode_mask[mode] for mode in modes_acted_on_by_term_beta])
# alpha_prime indices that could have a nonzero commutator, i.e.
# there's overlap between the modes the corresponding terms act on.
valid_alpha_primes = numpy.where(beta_mode_mask)[0]
# Only alpha_prime < beta enters the error operator; filter for this.
valid_alpha_primes = valid_alpha_primes[valid_alpha_primes < beta]
for alpha_prime in valid_alpha_primes:
term_alpha_prime = single_terms[alpha_prime]
inner_commutator_term = (
commutator_ordered_diagonal_coulomb_with_two_body_operator(
term_beta, term_alpha_prime))
modes_acted_on_by_inner_commutator = set()
for inner_commutator_action in inner_commutator_term.terms:
modes_acted_on_by_inner_commutator.update(
set(operator[0] for operator in inner_commutator_action))
# If the inner commutator has no action, the commutator is zero.
if not modes_acted_on_by_inner_commutator:
continue
inner_commutator_mask = numpy.logical_or.reduce(
[term_mode_mask[mode]
for mode in modes_acted_on_by_inner_commutator])
# alpha indices that could have a nonzero commutator.
valid_alphas = numpy.where(inner_commutator_mask)[0]
# Filter so alpha <= beta in the double commutator.
valid_alphas = valid_alphas[valid_alphas <= beta]
for alpha in valid_alphas:
# If alpha = beta, only use half the term.
if alpha != beta:
outer_term_alpha = single_terms[alpha]
else:
outer_term_alpha = halved_single_terms[alpha]
# Add the partial double commutator to the error operator.
commutator_ordered_diagonal_coulomb_with_two_body_operator(
outer_term_alpha, inner_commutator_term,
prior_terms=error_operator)
# Divide by 12 to match the error operator definition.
error_operator /= 12.0
return error_operator
``` |
{
"source": "Josh1560/Questionable",
"score": 3
} |
#### File: bot/extensions/search.py
```python
import discord
from discord.ext import commands
#Import required libraries
from json import load
from urllib.parse import quote
from aiohttp import ClientSession
from bs4 import BeautifulSoup
from datetime import datetime
#Import custom libraries
#Define variables
engineSettings = load(open("./bot/data/engineSettings.json"))
botSettings = load(open("./bot/data/botSettings.json"))
#Define functions
async def reply(ctx, string):
await ctx.send(f"{ctx.author.mention}, {string}")
class search:
def __init__(self, bot):
self.bot = bot
self.session = ClientSession(loop=bot.loop)
async def scrape(self, ctx, engine, query):
if query:
url = f"{engine['queryURL']}{quote(query)}"
links = list()
async with self.session.get(url) as response:
text = await response.text()
soup = BeautifulSoup(text, "html.parser")
selection = soup.select(engine["selection"])[:5]
prefixUrl = engine["prefixUrl"]
for i in selection:
links.append(f"[{i.text}]({prefixUrl}{i['href']})")
embed = discord.Embed(
timestamp = datetime.utcnow(),
colour = discord.Colour(botSettings["embedColour"])
)
embed.set_footer(
text = f"Requested by {ctx.author.name}#{ctx.author.discriminator}",
icon_url = ctx.author.avatar_url
)
embed.set_author(
name = f"Search ({engine['name']})",
icon_url = self.bot.user.avatar_url
)
embed.add_field(
name = "Search Query",
value = f"```{query}```",
inline = False
)
embed.add_field(
name = "Results",
value = "\n".join(links),
inline = False
)
await ctx.send(embed=embed)
else:
await reply(ctx, "Please input a search term! :warning:")
@commands.command()
async def search(self, ctx, *, args=None):
#Get server's default engine
await self.scrape(ctx, engineSettings["duckduckgo"], args)
@commands.command(aliases=["bing", "yahoo", "ask"])
async def duckduckgo(self, ctx, *, query=None):
await self.scrape(ctx, engineSettings[ctx.invoked_with.lower()], query)
@commands.command()
async def google(self, ctx):
await reply(ctx, "Google is not supported. :information_source:")
@commands.command()
async def youtube(self, ctx):
await reply(ctx, "YouTube is not supported, yet. :information_source:")
def setup(bot):
bot.add_cog(search(bot))
```
#### File: Questionable/bot/main.py
```python
import discord
from discord.ext import commands
#Import required libraries
from json import load
"""Hopefully remove one of the following:"""
from sys import exc_info
from traceback import print_exc
"""Yeah, I mean one of the above"""
from datetime import datetime
from os import environ
#Import custom libraries
#Define variables
botSettings = load(open("./bot/data/botSettings.json"))
myExtensions = [
"extensions.basic",
"extensions.developer",
"extensions.search"
]
#Define functions
async def reply(ctx, string):
await ctx.send(f"{ctx.author.mention}, {string}")
#Set up bot
bot = commands.AutoShardedBot(
#shard_count = 4,
command_prefix = botSettings["prefix"],
case_insensitive = True
)
bot.remove_command("help")
#Error handling
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.MissingPermissions):
await reply(ctx, "You must be a server admin to do that! :warning:")
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, commands.CommandNotFound):
pass
else:
print(error)
await reply(ctx, f"An error occured :warning:```\n{error}```")
@bot.event
async def on_error(event_method, *args):
if (isinstance(exc_info()[1], discord.Forbidden)):
print(f"Ignoring 403 exception in '{event_method}'.")
else:
print_exc()
#Internal events
@bot.event
async def on_ready():
bot.startTime = datetime.now().replace(microsecond=0)
print(f"Bot ready, logged in as '{bot.user.name}#{bot.user.discriminator}'. ({bot.user.id})")
await bot.change_presence(activity=discord.Game(f"{botSettings['prefix']}help"))
# TODO: Set up Postgres connection here
#Start up bot
if __name__ == "__main__":
for i in myExtensions:
bot.load_extension(i)
bot.run(environ["BOT_TOKEN"])
``` |
{
"source": "josh1924/CooperativeGames",
"score": 3
} |
#### File: cooperativegames/tests/test_measurestools.py
```python
import numpy as np
from cooperativegames.measures.cooperativegames_tools import all_subsets_it,\
winning_coalitions_it, get_critical_players, weight_coalition,\
all_subsets, _in_set
from cooperativegames.measures.tools import from_positions2weights
def test():
"""Main function for testing power indexs of the cooperative games package.
"""
## From positions to weighs
weights = from_positions2weights(np.random.random(10))
assert(weights.shape == (10, 10))
weights = from_positions2weights(np.random.random((10, 1)))
assert(weights.shape == (10, 10))
## Set management
set_a = [0, 1, 2]
set_b = set_a + [5, 6]
assert(_in_set(set_a, set_b))
setsubsets = all_subsets(set_a)
assert(len(setsubsets) == 2**len(set_a))
setsubsets = all_subsets(set_b)
assert(len(setsubsets) == 2**len(set_b))
for subset in all_subsets_it(set_b):
pass
## Combinations
# Weigh coalition
coalition = [0, 3, 4]
weight_coalition(coalition, weights)
# Get critical player
votes = np.random.randint(0, 20, 10)
win_v = np.around(votes.sum()*.3)
get_critical_players(coalition, votes, win_v)
for w_c in winning_coalitions_it(set_b, votes, win_v):
pass
``` |
{
"source": "josh2397/DLSArduino",
"score": 3
} |
#### File: DLSArduino/GUI/app.py
```python
from PyQt5.QtWidgets import (QApplication)
# from dialog import Ui_Dialog as Form
import serial.tools.list_ports as port_list
import sys
from view import View
from serialConnection import SerialConnection
from controller import Controller
from instructions import Instructions
class App (QApplication):
def __init__(self, sys_argv):
super(App, self).__init__(sys_argv)
# dialog = QDialog()
# dialog.ui = Form()
# dialog.ui.setupUi(dialog)
# dialog.exec_()
# dialog.show()
ports = list(port_list.comports())
for p in ports:
print (f'{p} is visible')
port = input("Enter the COM port you're connected to: ")
self.serialConnection = SerialConnection(port)
self.controller = Controller(self.serialConnection, Instructions)
self.view = View(self.serialConnection, self.controller)
self.view.show()
#self.serialConnection.connectionTest()
if __name__ == '__main__':
app = App(sys.argv)
sys.exit(app.exec_())
```
#### File: DLSArduino/GUI/serialConnection.py
```python
from instructions import Instructions
import serial
import serial.tools.list_ports as port_list
import time
import sys
class SerialConnection:
def __init__(self, port):
""" Sets up the serial connection with the specified configuration.
:param: port - the name of the port to be used within the serial configuration.
"""
self.ser = serial.Serial(port=port.upper(), baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_ODD, stopbits=serial.STOPBITS_TWO, xonxoff=False, timeout=200)
def connectionTest(self):
""" Sends two characters to the tiva and waits to receive them back to ensure that communication is ready to begin running the program.
"""
print("Turn on the Tiva")
g_byte = self.ser.read(1)
o_byte = self.ser.read(1)
if (g_byte.decode("ascii") != "G") | (o_byte.decode("ascii") != "O"):
print("Unsuccessful Serial Connection to Tiva, Try Again")
else:
print("Successful Serial Connection to Tiva")
def sendInstruction(self, instruction):
# instruction = '!'
"""
Send a single ascii character to inform the tiva of what task is should execute.
:param: instruction - the instruction character to be sent to the tiva.
"""
print(f'Sending: {instruction}')
self.ser.write(instruction.encode("ascii"))
self.ser.write('\n'.encode("ascii"))
self.ser.reset_input_buffer()
ser_bytes = self.ser.read(1)
print(f'Receiving\nraw data: {ser_bytes}')
# decoded_bytes = (ser_bytes.decode("ascii"))
# print(f'Ascii Value: {decoded_bytes}', flush=True)
def sendValue(self, value):
""" Sends a value associated with an instruction.
:param: value - The raw value to be sent to the tiva.
"""
print(f'Sending: {value}\n')
self.ser.write(bytes([value]))
self.ser.write('\n'.encode("ascii"))
self.ser.reset_input_buffer()
ser_bytes = self.ser.read(1)
print(f'Receiving\nraw data: {ser_bytes}')
#decoded_bytes = (ser_bytes.decode("ascii"))
#print(f'Ascii Value: {decoded_bytes}', flush=True)
def readSample(self):
""" Waits to receive samples from the Tiva and combines bytes from separated writes
:return: combined bytes for whole sample value.
"""
ser_bytes_sample1 = self.ser.read(1)
print(f'byte 1: {ser_bytes_sample1}')
ser_bytes_sample2 = self.ser.read(1)
print(f'byte 1: {ser_bytes_sample2}')
ser_bytes_total = int.from_bytes(ser_bytes_sample1, byteorder='little', signed=False) + (int.from_bytes(ser_bytes_sample2, byteorder='little', signed=False) << 8)
return ser_bytes_total
def readTime(self):
ser_bytes_lower = self.ser.read(1)
ser_bytes_mid = self.ser.read(1)
ser_bytes_upper = self.ser.read(1)
ser_bytes_total = int.from_bytes(ser_bytes_lower, byteorder='little', signed=False) + (int.from_bytes(ser_bytes_mid, byteorder='little', signed=False) << 8) + (int.from_bytes(ser_bytes_upper, byteorder='little', signed=False) << 16)
return ser_bytes_total/1000
def sendStopInstruction(self, instruction):
""" Sends an '!' character so the Tiva will interrupt and stop running any task that's currently executing.
:param: '!' instruction sent by the Stop Button handleStop function.
"""
print(f'Sending: {instruction}\n')
self.ser.write(instruction.encode("ascii"))
self.ser.write('\n'.encode("ascii"))
self.ser.reset_input_buffer()
ser_bytes = self.ser.read(1)
print(ser_bytes)
``` |
{
"source": "josh314/bach",
"score": 3
} |
#### File: josh314/bach/bach.py
```python
import signal
import asyncio
import logging
import aiohttp
class Client(object):
def __init__(self, loop, handler, max_connections=30):
self.loop = loop
self.handler = handler
self.sem = asyncio.Semaphore(max_connections)#For preventing accidental DOS
self.queue = asyncio.PriorityQueue()
self.processing = set()
self.done = set()
self.failed = set()
self.active = True
self.log = logging.getLogger(__name__)
self.log.addHandler(logging.NullHandler())
def enqueue(self, priority, url):
if self.active:
self.queue.put_nowait((priority,url))
@asyncio.coroutine
def get_html(self,url):
html = None
err = None
self.log.info("Requesting: " + url)
resp = yield from aiohttp.get(url)
if resp.status == 200:
html = yield from resp.read()
else:
if resp.status == 404:
err = aiohttp.web.HTTPNotFound
else:
err = aiohttp.HttpProcessingError(
code=resp.status, message=resp.reason,
headers=resp.headers)
resp.close()
if(err):
raise err
return html
@asyncio.coroutine
def process_page(self, url):
self.log.info("Processing: " + url)
self.processing.add(url)
try:
with (yield from self.sem):#Limits number of concurrent requests
html = yield from self.get_html(url)
except aiohttp.HttpProcessingError as e:
self.log.error('{}: {} {}'.format(url,e.code,e.message))
self.failed.add(url)
else:
success = self.handler.handle(url, html)
if success:
self.done.add(url)
else:
self.failed.add(url)
# finally:
# self.processing.remove(url)
@asyncio.coroutine
def batch_request(self):
while True:
try:
priority, url = yield from asyncio.wait_for(self.queue.get(),5)
self.loop.create_task(self.process_page(url))
except asyncio.TimeoutError:
self.log.info("No more requests.")
break
while self.processing:
self.log.debug("{} tasks still processing.".format(len(self.processing)))
yield from asyncio.sleep(5)
def launch(self, urls):
# queue up initial urls
for url in urls:
self.enqueue(*url)
task = self.loop.create_task(self.batch_request())
try:
self.loop.add_signal_handler(signal.SIGINT, self.shutdown)
except RuntimeError:
pass
try:
self.loop.run_until_complete(task)
except asyncio.CancelledError:
pass
def shutdown(self):
self.log.warning("Shutdown initiated.")
self.active = False
try:
while True:
self.queue.get_nowait()
except asyncio.QueueEmpty:
pass
for task in asyncio.Task.all_tasks():
task.cancel()
``` |
{
"source": "josh3255/EfficientNet",
"score": 3
} |
#### File: josh3255/EfficientNet/main.py
```python
import torch
from efficientnet_pytorch import EfficientNet
from models import CustomModel
from loss import *
from dataset import *
import torch.optim as optim
import torch.backends.cudnn as cudnn
if __name__ == '__main__':
model = CustomModel()
if torch.cuda.is_available():
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=[0]).cuda()
else:
print('cuda is not available')
model.eval()
optimizer = optim.Adam(model.parameters(), lr=3.2768e-5, weight_decay=5e-4)
img_path = 'C:/Users/JOSH/Desktop/classification_train_data'
label_path = 'C:/Users/JOSH/Desktop/classification_train_data/gt.txt'
valid_img_path = 'C:/Users/JOSH/Desktop/classification_valid_data'
valid_label_path = 'C:/Users/JOSH/Desktop/classification_valid_data/gt.txt'
dataset = CustomDataset(img_path=img_path, label_path=label_path)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=2)
valid_dataset = CustomDataset(img_path=valid_img_path, label_path=valid_label_path)
valid_dataloader = DataLoader(valid_dataset, batch_size=1, shuffle=True, num_workers=2)
print('data length : {}'.format(len(dataloader.dataset)))
criterion = Loss()
for epoch in range(100):
total_loss = 0.0
valid_loss = 0.0
accuracy = 0
for step, data in enumerate(dataloader):
def train():
optimizer.zero_grad()
img, gt = data
gt = gt.cuda()
outputs = model(img)
loss = criterion(outputs, gt)
loss.backward()
return loss
step_loss = optimizer.step(train)
# print('step : {} || step_loss : {}'.format(step, step_loss / 16))
total_loss = total_loss + step_loss.item()
for data in valid_dataloader:
img, gt = data
gt = gt.cuda()
outputs = model(img)
loss = criterion(outputs, gt)
outputs = torch.argmax(outputs, dim=1)
if outputs.item() == gt.item():
accuracy = accuracy + 1
valid_loss = valid_loss + loss
print('epoch : {} || total_loss : {} || validation acc : {} validation_loss : {}\n'.format(epoch, total_loss, accuracy/len(valid_dataloader.dataset), valid_loss))
# if epoch % 10 == 0:
torch.save(model.module.state_dict(), 'D:/classifier_state_dict/effinet_' + repr(epoch) + '.pth')
``` |
{
"source": "josh3255/GAN",
"score": 3
} |
#### File: josh3255/GAN/models.py
```python
import cv2
import torch
import numpy as np
import torch.nn as nn
# from config import *
class Discriminator(nn.Module):
def __init__(self, args):
super(Discriminator, self).__init__()
self.args = args
self.model = nn.Sequential(
nn.Linear(int(np.prod(self.args.train_img_size)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, img):
img_flat = img.view(img.size(0), -1)
validity = self.model(img_flat)
return validity
class Generator(nn.Module):
def __init__(self, args):
super(Generator, self).__init__()
self.args = args
def block(in_feature, out_feature, normalize=True):
layers = [nn.Linear(in_feature, out_feature)]
if normalize:
layers.append(nn.BatchNorm1d(out_feature, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(args.latent_dimension, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(self.args.train_img_size)))
)
def forward(self, z):
generated_image = self.model(z)
generated_image = generated_image.view(generated_image.size(0), *self.args.train_img_size)
return generated_image
def main(args):
g_model = Generator(args)
tmp = torch.randn((4, args.latent_dimension))
# tmp = torch.randn(args.train_img_size)
g_output = g_model(tmp)
# g_output = g_output.detach().numpy()
# save_g_output = g_output[0].transpose((1, 2, 0))
# print(save_g_output)
# cv2.imwrite('./test.jpg', save_g_output)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Parameters parser', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
``` |
{
"source": "josh3255/Vision-Transformer-Pytorch",
"score": 3
} |
#### File: josh3255/Vision-Transformer-Pytorch/main.py
```python
from dataset import *
from model import *
from loss import *
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
if __name__ == '__main__':
train_dataset = Cifar10Train('/home/josh/Data/cifar-10-python/cifar-10-batches-py/')
train_dataloader = DataLoader(train_dataset, batch_size=1024, shuffle=True, num_workers=1)
print('successfully loaded {} training images and labels'.format(len(train_dataloader.dataset)))
valid_dataset = Cifar10Valid('/home/josh/Data/cifar-10-python/cifar-10-testbatches-py')
valid_dataloader = DataLoader(valid_dataset, batch_size=512, shuffle=True, num_workers=1)
print('successfully loaded {} validation images and labels'.format(len(valid_dataloader.dataset)))
model = TransformerEncoder()
if torch.cuda.is_available():
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=[0]).cuda()
else:
print('cuda is not available')
optimizer = optim.Adam(model.parameters(), lr=3.2768e-5, weight_decay=5e-4)
criterion = Loss()
for epoch in range(100):
train_loss = 0.0
valid_loss = 0.0
accuracy = 0
model.train()
for step, item in enumerate(train_dataloader):
def train():
optimizer.zero_grad()
train_data, train_label = item
train_data = train_data.float().cuda()
train_label = train_label.cuda()
outputs = model(train_data)
loss = criterion(outputs, train_label)
loss.backward()
return loss
step_loss = optimizer.step(train)
train_loss = train_loss + step_loss.item()
# print('step : {} || step_loss : {}'w.format(step, step_loss))
model.eval()
for step, item in enumerate(valid_dataloader):
def valid():
valid_accuracy = 0
valid_data, valid_label = item
valid_data = valid_data.float().cuda()
valid_label = valid_label.cuda()
outputs = model(valid_data)
loss = criterion(outputs, valid_label)
outputs = torch.argmax(outputs, dim=1)
for i in range(len(valid_label)):
if outputs[i].item() == valid_label[i].item():
valid_accuracy = valid_accuracy + 1
return valid_accuracy, loss.item()
_a, _l = valid()
accuracy = accuracy + _a
valid_loss = valid_loss + _l
print('epoch : {} || train_loss : {} || validation acc : {} validation_loss : {}'.format(epoch, train_loss / len(train_dataloader.dataset),
accuracy / len(valid_dataloader.dataset),valid_loss / len(valid_dataloader.dataset)))
if epoch % 10 == 0:
torch.save(model.module.state_dict(), '/home/josh/Weights/state_dict/ViT_' + repr(epoch) + '.pth')
``` |
{
"source": "josh95117/freetype-py",
"score": 2
} |
#### File: freetype-py/examples/glyph-metrics.py
```python
from freetype import *
def arrow( x,y, dx, dy, **kwargs):
kwargs['shape'] = 'full'
kwargs['head_width'] = 30
kwargs['head_length'] = 40
kwargs['length_includes_head'] =True
kwargs['facecolor'] = 'k'
kwargs['edgecolor'] ='k'
kwargs['linewidth'] =.5
plt.arrow(x,y,dx,dy,**kwargs)
def double_arrow(x, y, dx, dy, **kwargs):
cx,cy = x+dx/2., y+dy/2.
dx /= 2.0
dy /= 2.0
arrow(cx,cy,+dx,+dy,**kwargs)
arrow(cx,cy,-dx,-dy,**kwargs)
def line(x, y, dx, dy, **kwargs):
kwargs['color'] = 'k'
kwargs['linewidth'] =.5
plt.plot([x,x+dx],[y,y+dy],**kwargs)
def point(x, y, r, **kwargs):
kwargs['color'] = 'k'
plt.scatter([x],[y],r,**kwargs)
def text( x,y,text, **kwargs):
kwargs['fontsize'] = 18
plt.text(x, y, text, **kwargs)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face('./Vera.ttf')
face.set_char_size( 32*64 )
face.load_char('g')
slot = face.glyph
bitmap = slot.bitmap
width = slot.bitmap.width
rows = slot.bitmap.rows
pitch = slot.bitmap.pitch
outline= slot.outline
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
VERTS = np.array(VERTS)
x,y = VERTS[:,0], VERTS[:,1]
VERTS[:,0], VERTS[:,1] = x, y
path = Path(VERTS, CODES)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
width,height = xmax-xmin, ymax-ymin
dw, dh = 0.2*width, 0.1*height
bearing = xmin - slot.metrics.horiBearingX, ymin - slot.metrics.horiBearingY
advance = slot.advance
origin = bearing
figure = plt.figure(figsize=(16,10), frameon=False, facecolor="white")
axes = plt.subplot(121, frameon=False, aspect=1)
glyph = patches.PathPatch(path, fill = True, facecolor='k', lw=0)
plt.xlim(xmin - .25*width, xmax + .75*width)
plt.ylim(ymin - .5*height, xmax + .75*height)
plt.xticks([]), plt.yticks([])
axes.add_patch(glyph)
# Y axis
arrow(origin[0], ymin-dh, 0, height+3*dh)
# X axis
arrow(origin[0]-dw, 0, width+3*dw, 0)
# origin
point(0,0,50)
text( -20, -20, "$origin$", va='top', ha='right')
# Bounding box
bbox = patches.Rectangle( (xmin,ymin), width, height, fill = False, lw=.5)
axes.add_patch(bbox)
# Width
line(xmin, ymax, 0, 3*dh, linestyle="dotted")
text( xmin, ymax+3.25*dh, "$x_{min}$", va='bottom', ha='center')
line(xmax, ymax, 0, 3*dh, linestyle="dotted")
text( xmax, ymax+3.25*dh, "$x_{max}$", va='bottom', ha='center')
double_arrow(xmin, ymax+2.5*dh, width, 0)
text(xmin+width/2., ymax+1.75*dh, "$width$", va='bottom', ha='center')
# Height
line(xmax, ymin, 3*dw, 0, linestyle="dotted")
text(xmax+3.25*dw, ymin, "$y_{min}$", va='baseline', ha='left')
line(xmax, ymax, 3*dw, 0, linestyle="dotted")
text(xmax+3.25*dw, ymax, "$y_{max}$", va='baseline', ha='left')
double_arrow(xmax+2.5*dw, ymin, 0, height)
text(xmax+2.75*dw, ymin+height/2., "$height$", va='center', ha='left')
# Advance
point(advance.x,0,50)
line(advance.x, 0, 0, ymin-dh, linestyle="dotted")
arrow(0, ymin-.5*dh, advance.x, 0)
text(advance.x/2., ymin-1.25*dh, "$advance$", va='bottom', ha='center')
# Bearing Y
arrow(xmax+.25*dw, 0, 0, ymax)
text(xmax+.5*dw, ymax/2, "$Y_{bearing}$", va='center', ha='left')
# Bearing X
arrow(0, ymax/2., xmin, 0)
text(-10, ymax/2, "$X_{bearing}$", va='baseline', ha='right')
# -------------------------------------------------------------------------
axes = plt.subplot(122, frameon=False, aspect=1)
glyph = patches.PathPatch(path, fill = True, facecolor='k', lw=0)
axes.add_patch(glyph)
plt.xlim(xmin - .25*width, xmax + .75*width)
plt.ylim(ymin - .5*height, xmax + .75*height)
plt.xticks([]), plt.yticks([])
advance = slot.metrics.vertAdvance
x_bearing = slot.metrics.vertBearingX
y_bearing = slot.metrics.vertBearingY
# Y axis
arrow(xmin-x_bearing, ymax+y_bearing+2*dh, 0, -advance-3*dh)
# X axis
arrow(xmin-2*dw, ymax+y_bearing, width+4*dw, 0)
# origin
point( xmin-x_bearing, ymax+y_bearing, 50)
text( xmin-x_bearing-30, ymax+y_bearing+10, "$origin$", va='bottom', ha='right')
# Bounding box
bbox = patches.Rectangle( (xmin,ymin), width, height, fill = False, lw=.5)
axes.add_patch(bbox)
# # Advance
point(xmin-x_bearing, ymax+y_bearing-advance, 50)
line(xmin-x_bearing, ymax+y_bearing-advance, xmax-dw, 0, linestyle="dotted")
arrow(xmax+dw, ymax+y_bearing, 0, -advance)
text(xmax+1.25*dw, ymax+y_bearing-advance/2., "$advance$", va='baseline', ha='left')
# Width
line(xmin, ymin, 0, -4*dh, linestyle="dotted")
text( xmin, ymin-4.25*dh, "$x_{min}$", va='top', ha='center')
line(xmax, ymin, 0, -4*dh, linestyle="dotted")
text( xmax, ymin-4.25*dh, "$x_{max}$", va='top', ha='center')
double_arrow(xmin, ymin-3.5*dh, width, 0)
text(xmin+width/2., ymin-3.75*dh, "$width$", va='top', ha='center')
# Height
line(xmin, ymin, -3*dw, 0, linestyle="dotted")
text(xmin-1.5*dw, ymin, "$y_{min}$", va='baseline', ha='right')
line(xmin, ymax, -3*dw, 0, linestyle="dotted")
text(xmin-1.5*dw, ymax, "$y_{max}$", va='baseline', ha='right')
double_arrow(xmin-.5*dw, ymin, 0, height)
text(xmin-.75*dw, ymin+height/2., "$height$", va='center', ha='right')
#point(xmin-x_bearing, ymax+y_bearing, 50)
# Bearing Y
arrow(xmax-.5*dw, ymax+y_bearing, 0, -y_bearing)
text(xmax-.5*dw, ymax+y_bearing+.25*dh, "$Y_{bearing}$", va='bottom', ha='center')
# # Bearing X
line(xmin, ymax, 0, 3*dh, linestyle="dotted")
arrow(xmin-x_bearing, ymax+y_bearing+dh, x_bearing, 0)
text(xmin-.25*dw, ymax+y_bearing+dh, "$X_{bearing}$", va='baseline', ha='right')
plt.savefig('glyph-metrics.pdf')
plt.show()
```
#### File: freetype-py/examples/glyph-vector-2-cairo.py
```python
from freetype import *
# using Matrix class from Cairo, instead of FreeType's
from cairo import Context, SVGSurface, Matrix, SurfacePattern, FILTER_BEST
from bitmap_to_surface import make_image_surface
if __name__ == '__main__':
import numpy
from PIL import Image
# Replacement for Path enums:
STOP, MOVETO, LINETO, CURVE3, CURVE4 = 0, 1, 2, 3, 4
face = Face('./Vera.ttf')
face.set_char_size( 32*64 )
face.load_char('g')
slot = face.glyph
bitmap = face.glyph.bitmap
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
Z = make_image_surface(bitmap)
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
bbox = outline.get_cbox()
MARGIN = 10
scale = 3
def Floor64(x):
return (x//64) * 64
def Ceil64(x):
return ((x+63)//64) * 64
width_s = (width * 64)//scale + 2 * MARGIN
height_s = (rows * 64)//scale + 2 * MARGIN
surface = SVGSurface('glyph-vector-2-cairo.svg',
width_s,
height_s)
ctx = Context(surface)
ctx.set_source_rgb(1,1,1)
ctx.paint()
ctx.save()
ctx.scale(1.0/scale,1.0/scale)
ctx.translate(-Floor64(bbox.xMin) + MARGIN * scale,-Floor64(bbox.yMin) + MARGIN * scale)
ctx.transform(Matrix(1,0,0,-1))
ctx.translate(0, -(Ceil64(bbox.yMax) + Floor64(bbox.yMin))) # difference!
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if ( FT_Curve_Tag( tags[j] ) == FT_Curve_Tag_On ) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [MOVETO,]
tags.pop()
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([CURVE3, CURVE3])
elif ( len(segment) == 4 ) \
and ( FT_Curve_Tag(tags[1]) == FT_Curve_Tag_Cubic ) \
and ( FT_Curve_Tag(tags[2]) == FT_Curve_Tag_Cubic ):
verts.extend(segment[1:])
codes.extend([CURVE4, CURVE4, CURVE4])
else:
# Interpolating
verts.append(segment[1])
codes.append(CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ CURVE3, CURVE3])
verts.append(segment[-1])
codes.append(CURVE3)
[tags.pop() for x in range(len(segment) - 1)]
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph
ctx.new_path()
ctx.set_source_rgba(0.8,0.5,0.8, 1)
i = 0
while (i < len(CODES)):
if (CODES[i] == MOVETO):
ctx.move_to(VERTS[i][0],VERTS[i][1])
i += 1
elif (CODES[i] == LINETO):
ctx.line_to(VERTS[i][0],VERTS[i][1])
i += 1
elif (CODES[i] == CURVE3):
ctx.curve_to(VERTS[i][0],VERTS[i][1],
VERTS[i+1][0],VERTS[i+1][1], # undocumented
VERTS[i+1][0],VERTS[i+1][1])
i += 2
elif (CODES[i] == CURVE4):
ctx.curve_to(VERTS[i][0],VERTS[i][1],
VERTS[i+1][0],VERTS[i+1][1],
VERTS[i+2][0],VERTS[i+2][1])
i += 3
ctx.fill_preserve()
ctx.set_source_rgb(0,0,0)
ctx.set_line_width(6)
ctx.stroke()
ctx.restore()
scale2 = (height_s - 2.0 * MARGIN)/rows
ctx.set_source_surface(Z, 0, 0)
pattern = ctx.get_source()
SurfacePattern.set_filter(pattern, FILTER_BEST)
scalematrix = Matrix()
scalematrix.scale(1.0/scale2, 1.0/scale2)
scalematrix.translate(-( width_s/2.0 - width *scale2 /2.0 ), -MARGIN)
pattern.set_matrix(scalematrix)
ctx.set_source_rgba (0, 0, 0, 0.7)
ctx.mask(pattern)
ctx.fill()
surface.flush()
surface.write_to_png("glyph-vector-2-cairo.png")
surface.finish()
Image.open("glyph-vector-2-cairo.png").show()
```
#### File: freetype-py/examples/subpixel-positioning.py
```python
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLUT as glut
from texture_font import TextureFont, TextureAtlas
from shader import Shader
vert='''
uniform sampler2D texture;
uniform vec2 pixel;
attribute float modulo;
varying float m;
void main() {
gl_FrontColor = gl_Color;
gl_TexCoord[0].xy = gl_MultiTexCoord0.xy;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
m = modulo;
}
'''
frag='''
uniform sampler2D texture;
uniform vec2 pixel;
varying float m;
void main() {
float gamma = 1.0;
vec2 uv = gl_TexCoord[0].xy;
vec4 current = texture2D(texture, uv);
vec4 previous= texture2D(texture, uv+vec2(-1,0)*pixel);
current = pow(current, vec4(1.0/gamma));
previous = pow(previous, vec4(1.0/gamma));
float r = current.r;
float g = current.g;
float b = current.b;
float a = current.a;
if( m <= 0.333 )
{
float z = m/0.333;
r = mix(current.r, previous.b, z);
g = mix(current.g, current.r, z);
b = mix(current.b, current.g, z);
}
else if( m <= 0.666 )
{
float z = (m-0.33)/0.333;
r = mix(previous.b, previous.g, z);
g = mix(current.r, previous.b, z);
b = mix(current.g, current.r, z);
}
else if( m < 1.0 )
{
float z = (m-0.66)/0.334;
r = mix(previous.g, previous.r, z);
g = mix(previous.b, previous.g, z);
b = mix(current.r, previous.b, z);
}
float t = max(max(r,g),b);
vec4 color = vec4(0.,0.,0., (r+g+b)/2.);
color = t*color + (1.-t)*vec4(r,g,b, min(min(r,g),b));
gl_FragColor = vec4( color.rgb, color.a);
}
'''
class Label:
def __init__(self, text, font, color=(1.0, 1.0, 1.0, 0.0), x=0, y=0,
width=None, height=None, anchor_x='left', anchor_y='baseline'):
self.text = text
self.vertices = np.zeros((len(text)*4,3), dtype=np.float32)
self.indices = np.zeros((len(text)*6, ), dtype=np.uint)
self.colors = np.zeros((len(text)*4,4), dtype=np.float32)
self.texcoords= np.zeros((len(text)*4,2), dtype=np.float32)
self.attrib = np.zeros((len(text)*4,1), dtype=np.float32)
pen = [x,y]
prev = None
for i,charcode in enumerate(text):
glyph = font[charcode]
kerning = glyph.get_kerning(prev)
x0 = pen[0] + glyph.offset[0] + kerning
dx = x0-int(x0)
x0 = int(x0)
y0 = pen[1] + glyph.offset[1]
x1 = x0 + glyph.size[0]
y1 = y0 - glyph.size[1]
u0 = glyph.texcoords[0]
v0 = glyph.texcoords[1]
u1 = glyph.texcoords[2]
v1 = glyph.texcoords[3]
index = i*4
indices = [index, index+1, index+2, index, index+2, index+3]
vertices = [[x0,y0,1],[x0,y1,1],[x1,y1,1], [x1,y0,1]]
texcoords = [[u0,v0],[u0,v1],[u1,v1], [u1,v0]]
colors = [color,]*4
self.vertices[i*4:i*4+4] = vertices
self.indices[i*6:i*6+6] = indices
self.texcoords[i*4:i*4+4] = texcoords
self.colors[i*4:i*4+4] = colors
self.attrib[i*4:i*4+4] = dx
pen[0] = pen[0]+glyph.advance[0]/64.0 + kerning
pen[1] = pen[1]+glyph.advance[1]/64.0
prev = charcode
width = pen[0]-glyph.advance[0]/64.0+glyph.size[0]
if anchor_y == 'top':
dy = -round(font.ascender)
elif anchor_y == 'center':
dy = +round(-font.height/2-font.descender)
elif anchor_y == 'bottom':
dy = -round(font.descender)
else:
dy = 0
if anchor_x == 'right':
dx = -width/1.0
elif anchor_x == 'center':
dx = -width/2.0
else:
dx = 0
self.vertices += (round(dx), round(dy), 0)
def draw(self):
gl.glEnable( gl.GL_TEXTURE_2D )
gl.glDisable( gl.GL_DEPTH_TEST )
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_COLOR_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointer(3, gl.GL_FLOAT, 0, self.vertices)
gl.glColorPointer(4, gl.GL_FLOAT, 0, self.colors)
gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, self.texcoords)
r,g,b = 0,0,0
gl.glColor( 1, 1, 1, 1 )
gl.glEnable( gl.GL_BLEND )
#gl.glBlendFunc( gl.GL_CONSTANT_COLOR_EXT, gl.GL_ONE_MINUS_SRC_COLOR )
#gl.glBlendColor(r,g,b,1)
gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )
gl.glBlendColor( 1, 1, 1, 1 )
gl.glEnableVertexAttribArray( 1 );
gl.glVertexAttribPointer( 1, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, self.attrib)
shader.bind()
shader.uniformi('texture', 0)
shader.uniformf('pixel', 1.0/512, 1.0/512)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.indices),
gl.GL_UNSIGNED_INT, self.indices)
shader.unbind()
gl.glDisableVertexAttribArray( 1 );
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_COLOR_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glDisable( gl.GL_TEXTURE_2D )
gl.glDisable( gl.GL_BLEND )
if __name__ == '__main__':
import sys
atlas = TextureAtlas(512,512,3)
def on_display( ):
#gl.glClearColor(0,0,0,1)
gl.glClearColor(1,1,1,1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glBindTexture( gl.GL_TEXTURE_2D, atlas.texid )
for label in labels:
label.draw()
gl.glColor(0,0,0,1)
gl.glBegin(gl.GL_LINES)
gl.glVertex2i(15,0)
gl.glVertex2i(15, 330)
gl.glVertex2i(225, 0)
gl.glVertex2i(225, 330)
gl.glEnd()
glut.glutSwapBuffers( )
def on_reshape( width, height ):
gl.glViewport( 0, 0, width, height )
gl.glMatrixMode( gl.GL_PROJECTION )
gl.glLoadIdentity( )
gl.glOrtho( 0, width, 0, height, -1, 1 )
gl.glMatrixMode( gl.GL_MODELVIEW )
gl.glLoadIdentity( )
def on_keyboard( key, x, y ):
if key == '\033':
sys.exit( )
glut.glutInit( sys.argv )
glut.glutInitDisplayMode( glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH )
glut.glutCreateWindow( "Freetype OpenGL" )
glut.glutReshapeWindow( 240, 330 )
glut.glutDisplayFunc( on_display )
glut.glutReshapeFunc( on_reshape )
glut.glutKeyboardFunc( on_keyboard )
font = TextureFont(atlas, './Vera.ttf', 9)
text = "|... A Quick Brown Fox Jumps Over The Lazy Dog"
labels = []
x,y = 20,310
for i in range(30):
labels.append(Label(text=text, font=font, x=x, y=y))
x += 0.1000000000001
y -= 10
atlas.upload()
shader = Shader(vert,frag)
glut.glutMainLoop( )
```
#### File: freetype/ft_enums/ft_curve_tags.py
```python
FT_CURVE_TAGS = {
'FT_CURVE_TAG_ON' : 1,
'FT_CURVE_TAG_CONIC' : 0,
'FT_CURVE_TAG_CUBIC' : 2}
globals().update(FT_CURVE_TAGS)
FT_Curve_Tag_On = FT_CURVE_TAG_ON
FT_Curve_Tag_Conic = FT_CURVE_TAG_CONIC
FT_Curve_Tag_Cubic = FT_CURVE_TAG_CUBIC
def FT_CURVE_TAG( flag ):
return ( flag & 3 )
# FreeType itself does not have mixed-case macros
FT_Curve_Tag = FT_CURVE_TAG
``` |
{
"source": "josh9730/circuit_migrations",
"score": 2
} |
#### File: maintenances/custom_napalm/junos.py
```python
import logging
import netaddr
from jnpr.junos.exception import RpcError
from napalm.junos.junos import JunOSDriver
from custom_napalm.utils import junos_cust_views
log = logging.getLogger(__file__)
class CustomJunOSDriver(JunOSDriver):
"""Extends base JunOSDriver for custom methods."""
def _rpc_get(self, rpc_input, msg):
"""Method for rpc get, return errors."""
try:
return rpc_input.get()
except RpcError as rpcerr:
log.error(f"Unable to retrieve {msg} information:")
log.error(str(rpcerr))
return {}
def _bgp_routes_format(self, route_dict: dict, destination: str) -> dict:
"""Takes dict created from returned tuple, and parses."""
prefix_length = route_dict.pop("prefix_length", 32)
destination = f"{destination}/{prefix_length}"
as_path = route_dict.get("as_path")
if as_path is not None: # return only AS Numbers
as_path = (
as_path.split(" I ")[0]
.replace("AS path:", "")
.replace("I", "")
.replace("\n Recorded", "")
.strip()
)
communities = route_dict.get("communities")
if communities is not None and type(communities) is not list:
communities = [communities]
return {
destination: {
"Next-Hop": route_dict["next_hop"],
"Local Preference": route_dict["local_preference"],
"AS-Path": as_path,
"MED": route_dict["metric"],
"Communities": communities,
}
}
def get_isis_interfaces_custom(self) -> dict:
"""Via PyEZ, return dict of ISIS interfaces
RPC output:
isis_adjacencies = [(
PORT,
[
("isis_neighbor", system-id),
("isis_state", bool),
("isis_ipv6", bool),
("isis_nh", ipaddress),
],)]
# isis_interfaces = [
(port, [("isis_metric", int)])
]
method return:
{{ Interface }}: {
isis_neighbor: system-id
isis_state: bool
isis_next-hop: ipaddress
isis_ipv6: bool
isis_metric: int
}
"""
rpc_adj = junos_cust_views.ISISAdjacencyTable(self.device)
isis_adjacencies = self._rpc_get(rpc_adj, "IS-IS neighbors").items()
rpc_int = junos_cust_views.ISISInterfaceTable(self.device)
isis_interfaces = self._rpc_get(rpc_int, "IS-IS neighbors").items()
# convert int_results to dict
int_dict = {}
for interface in isis_interfaces:
int_dict.update({interface[0]: {elem[0]: elem[1] for elem in interface[1]}})
# create return dict
isis = {}
for neighbor in isis_adjacencies:
isis.update({neighbor[0]: {elem[0]: elem[1] for elem in neighbor[1]}})
isis[neighbor[0]].update(int_dict[neighbor[0]])
return isis
def get_mpls_interfaces_custom(self) -> dict:
"""Via PyEZ, return dict of MPLS-enabled interfaces.
RPC output:
[((PORT,), [("mpls_enabled", bool)])]
method return:
{{ Interface }}: {
mpls_enabled: bool
}
"""
rpc = junos_cust_views.MPLSInterfaceTable(self.device)
mpls_interfaces = self._rpc_get(rpc, "MPLS Interfaces").items()
# create return dict
mpls = {}
for port in mpls_interfaces:
mpls.update({port[0][0]: {elem[0]: elem[1] for elem in port[1]}})
return mpls
def get_msdp_neighbrs_custom(self) -> list:
"""Via PyEZ, return list of MSDP neighbors."""
rpc = junos_cust_views.MSDPNeighborTable(self.device)
return self._rpc_get(rpc, "MSDP Neighbors").keys()
def get_pim_neighbors_custom(self) -> list:
"""Via PyEZ, return list of PIM neighbors."""
rpc = junos_cust_views.PIMNeighborTable(self.device)
return self._rpc_get(rpc, "PIM Interfaces").keys()
def get_arp_table_custom(self) -> dict:
"""Via PyEZ, return dict of ARP table.
RPC return:
[(PORT, [('arp_nh', IPADDRESS), ('arp_nh_mac', MAC)])]
method return:
{{ Interface }}: {
arp_nh
arp_nh_mac
}
MAC is normalized using EUI format.
"""
rpc = junos_cust_views.ARPTable(self.device)
arp_table = self._rpc_get(rpc, "ARP").items()
arp = {}
for neighbor in arp_table:
arp.update({neighbor[0]: {elem[0]: elem[1] for elem in neighbor[1]}})
mac = arp[neighbor[0]]["arp_nh_mac"]
try:
arp[neighbor[0]]["arp_nh_mac"] = str(netaddr.EUI(mac)).replace("-", ":")
except netaddr.core.AddrFormatError:
pass
return arp
def get_nd_table_custom(self) -> dict:
"""Via PyEZ, return dict of ND table.
RPC return:
[(PORT, [('nd_nh', IPADDRESS), ('nd_nh_mac', MAC)])]
method return:
{{ Interface }}: {
nd_nh
nd_nh_mac
}
MAC is normalized using EUI format.
"""
rpc = junos_cust_views.NDTable(self.device)
nd_table = self._rpc_get(rpc, "IPv6 ND").items()
nd = {}
for neighbor in nd_table:
nd.update({neighbor[0]: {elem[0]: elem[1] for elem in neighbor[1]}})
mac = nd[neighbor[0]]["nd_nh_mac"]
try:
nd[neighbor[0]]["nd_nh_mac"] = str(netaddr.EUI(mac)).replace("-", ":")
except netaddr.core.AddrFormatError:
pass
return nd
def get_bgp_neighbors_detail_custom(self) -> dict:
"""Via PyEz, return custom BGP Neighbors Detail.
Differences between hardware/software versions. Default Napalm getter
expects peer-address, local-address, local-as, remote-as to be directly
under the 'bgp-peer' element. On some devices, those elements are instead
nested under 'bgp-peer-header'. This getter accounts for both.
The Napalm getter also aggregated the rib counts. This getter returns a
nested dict by routing table instead.
RPC output:
[(IPADDRESS,
[ ('up', bool),
('local_as', int),
('remote_as', int),
('local_as_2', int),
('remote_as_2', int),
('router_id', IPADDRESS),
('local_address', IPADDRESS),
('routing_table', str),
('import_policy', str),
( 'export_policy', str),
('rib', junos_bgp_rib_table:IPADDRESS: int items)])]
rib_table:
[ ( TABLE,
[ ('received_prefix_count', int),
('accepted_prefix_count', int),
('advertised_prefix_count', int)])
method return:
IPADDRESS: {
"up": bool,
"local_as": int,
"remote_as": int,
"router_id": IPADDRESS,
"local_address": IPADDRESS,
"routing_table": str,
"import_policy": str,
"export_policy": str,
TABLE: {
"received_prefix_count": int,
"accepted_prefix_count": int,
"advertised_prefix_count": int
}
},
"""
rpc = junos_cust_views.junos_bgp_neighbors_table(self.device)
neighbor_data = self._rpc_get(rpc, "BGP Neighbors").items()
bgp_detail = {}
for neighbor in neighbor_data:
neighbor_details = {elem[0]: elem[1] for elem in neighbor[1]}
# remove one of local_as or local_as_2, etc
for i in ["local_as", "remote_as"]:
if not neighbor_details[i]:
neighbor_details[i] = neighbor_details.pop(f"{i}_2")
else:
neighbor_details.pop(f"{i}_2")
# remove ports from address field if present
neighbor_details["local_address"] = neighbor_details["local_address"].split(
"+"
)[0]
# append rib tables, will return nested dicts of tables
_RIB_TABLES = [
"inet.0",
"inet6.0",
]
neighbors_rib = neighbor_details.pop("rib").items()
for rib_table in neighbors_rib:
if rib_table[0] in _RIB_TABLES: # prune non-unicast ribs
neighbor_details.update({rib_table[0]: dict(rib_table[1])})
bgp_detail.update({neighbor[0].split("+")[0]: neighbor_details})
return bgp_detail
def get_bgp_neighbor_routes_custom(self, peer: str) -> list:
"""Via PyEZ, return BGP neighbor information from direct neighbor.
Equivalent to:
show route receive-protobgp bgp {{ NEIGHBOR }} table {{ table }} extensive
RPC output:
[(DESTINATION,
[('prefix_length', int),
('next_hop', IPADDRESS),
('as_path', PATH),
('local_preference', int),
('communities', []),
('metric', int)])]
method return:
"BGP": {
PREFIX: {
"Next-Hop": IPADDRESS,
"Local Preference": int,
"AS-Path": "int",
"MED": int,
"Communities": []
}
}
"""
routes = {}
routes_table = junos_cust_views.junos_bgp_rx_route_table(self.device)
table_key = "" if netaddr.IPAddress(peer).version == 4 else "6"
kwargs = {"peer": peer, "table": f"inet{table_key}.0"}
try:
routes_table.get(**kwargs)
except RpcError as rpcerr:
log.error("Unable to retrieve BGP Rx Routes information:")
log.error(str(rpcerr))
routes_table = {}
for route in routes_table.items():
route_dict = {elem[0]: elem[1] for elem in route[1]}
routes.update(self._bgp_routes_format(route_dict, route[0]))
return routes
def get_route_to_custom(self, routes_list: list, neighbor="") -> dict:
"""Custom implementation of default 'get_route_to' getter. Returns less info,
specific to BGP. Eliminates the need for a parser. Much of this is from the
Napalm getter.
Accepts a list of destinations, returns a nested dict of results.
method return:
"BGP": {
PREFIX: {
"Next-Hop": IPADDRESS,
"Local Preference": int,
"AS-Path": "int",
"MED": int,
"Communities": []
}
}
"""
if not isinstance(routes_list, list):
raise TypeError("Please a valid list of destinations")
routes = {}
routes_table = junos_cust_views.junos_bgp_route_table(self.device)
for route in routes_list:
if route:
table_key = "" if netaddr.IPNetwork(route).version == 4 else "6"
try:
route_output = routes_table.get(
route, table=f"inet{table_key}.0"
).items()
except RpcError as rpcerr:
log.error("Unable to retrieve BGP Rx Routes information:")
log.error(str(rpcerr))
routes_table = {}
route_dict = {elem[0]: elem[1] for elem in route_output[0][1]}
destination = route_dict.pop("destination", "")
routes.update(self._bgp_routes_format(route_dict, destination))
if not routes:
routes = "No active BGP prefixes."
return routes
``` |
{
"source": "josh9730/mops",
"score": 3
} |
#### File: mops/utils/atlassian.py
```python
import keyring
from atlassian import Jira, Confluence
class Atlassian:
"""Base class for Jira & Confluence methods."""
def __init__(self):
jira_url = keyring.get_password("jira", "url")
confluence_url = keyring.get_password("confl", "url")
username = keyring.get_password("cas", "user")
password = keyring.get_password("cas", username)
self.jira = Jira(
url=jira_url,
username=username,
password=password,
)
self.confluence = Confluence(
url=confluence_url,
username=username,
password=password,
)
def jira_projects_list(self) -> list:
"""Return list of Jira Projects."""
projects = self.jira.projects(included_archived=None)
return [project["key"] for project in projects]
def jira_create_link(self, link_data: list) -> None:
"""Link Jira ticket to Confluence page.
The Jira macro supplied in the Confluence template only creates a
unidirectional link Confluence -> Jira. This method creates a link
Jira -> Confluence.
link_data:
ticket: str
link_title: url
page_title: str
"""
self.jira.create_or_update_issue_remote_links(
*link_data, relationship="mentioned in"
)
def confluence_create_or_update(self, page_data: tuple) -> None:
"""Create or Update Confluence page.
page_data: list in the form of:
parent_page_id: int
page_title: str
rendered_mop: str, in Confluence Wiki format
"""
self.confluence.update_or_create(*page_data, representation="wiki")
``` |
{
"source": "josh99/Cryptography_Algorithms",
"score": 3
} |
#### File: josh99/Cryptography_Algorithms/one-time-pad.py
```python
import numpy as np
alpha = "abcdefghijklmnopqrstuvwxyz"
ran_key = "<KEY>"
def rand_key(data):
new_key = ""
for el in data:
new_key += ran_key[alpha.index(el)]
return new_key
text = input("enter the text Message >>" )
print("key :",rand_key(text))
a = list(text)
b = list(rand_key(text))
c = ""
#encypt
for i in range(0,len(a)):
val_i = alpha.index(a[i])
val_k = alpha.index(b[i])
c +=alpha[int(val_i + val_k) % 26]
print("cypher text : ",c)
#decrypt
d=""
for i in range(0,len(c)):
val_i = alpha.index(c[i])
val_k = alpha.index(b[i])
d +=alpha[int(val_i - val_k) % 26]
print("decode text : ",d)
```
#### File: josh99/Cryptography_Algorithms/RSA_basic.py
```python
def gcd(a, b):
while b != 0:
c = a % b
a = b
b = c
return a
def modinv(a, m):
for x in range(1, m):
if (a * x) % m == 1:
return x
return None
def coprimes(a):
l = []
for x in range(2, a):
if gcd(a, x) == 1 and modinv(x,phi) != None:
l.append(x)
for x in l:
if x == modinv(x,phi):
l.remove(x)
return l
print("RSA implementation")
p = 9
q = 12
m = p*q
phi = (p-1)*(q-1)
e =coprimes(phi)[-1] #has to be co-prime with m
print("select e ")
print(coprimes(phi))
d = e % m
print("Key generated :"+str(d))
data = int(input("enter data to encrpt: "))
#encr
enc = data^e % m
print("encrypted data :"+str(enc))
#decrypt
print("decrypted data :"+ str(enc^d % m))
``` |
{
"source": "Joshaa1999/ReBench",
"score": 2
} |
#### File: ReBench/rebench/executor.py
```python
from __future__ import with_statement
from codecs import open as open_with_enc
from collections import deque
from math import floor
from multiprocessing import cpu_count
import os
import pkgutil
import random
import subprocess
import sys
from threading import Thread, RLock
from time import time
from . import subprocess_with_timeout as subprocess_timeout
from .interop.adapter import ExecutionDeliveredNoResults
from .ui import escape_braces
class FailedBuilding(Exception):
"""The exception to be raised when building of the executor or suite failed."""
def __init__(self, name, build_command):
super(FailedBuilding, self).__init__()
self._name = name
self._build_command = build_command
class RunScheduler(object):
def __init__(self, executor, ui):
self._executor = executor
self.ui = ui
self._runs_completed = 0
self._start_time = time()
self._total_num_runs = 0
@staticmethod
def _filter_out_completed_runs(runs, ui):
return [run for run in runs if not run.is_completed(ui)]
@staticmethod
def number_of_uncompleted_runs(runs, ui):
return len(RunScheduler._filter_out_completed_runs(runs, ui))
def _process_remaining_runs(self, runs):
"""Abstract, to be implemented"""
def _estimate_time_left(self):
if self._runs_completed == 0:
return 0, 0, 0
current = time()
time_per_invocation = ((current - self._start_time) / self._runs_completed)
etl = time_per_invocation * (self._total_num_runs - self._runs_completed)
sec = etl % 60
minute = (etl - sec) / 60 % 60
hour = (etl - sec - minute) / 60 / 60
return floor(hour), floor(minute), floor(sec)
def _indicate_progress(self, completed_task, run):
if not self.ui.spinner_initialized():
return
if completed_task:
self._runs_completed += 1
art_mean = run.get_mean_of_totals()
hour, minute, sec = self._estimate_time_left()
run_details = run.as_simple_string().replace(" None", "")
label = "Running Benchmarks: %70s\tmean: %10.1f\ttime left: %02d:%02d:%02d" \
% (run_details, art_mean, hour, minute, sec)
self.ui.step_spinner(self._runs_completed, label)
def indicate_build(self, run_id):
run_id_names = run_id.as_str_list()
self.ui.step_spinner(
self._runs_completed, "Run build for %s %s" % (run_id_names[1], run_id_names[2]))
def execute(self):
self._total_num_runs = len(self._executor.runs)
runs = self._filter_out_completed_runs(self._executor.runs, self.ui)
completed_runs = self._total_num_runs - len(runs)
self._runs_completed = completed_runs
with self.ui.init_spinner(self._total_num_runs):
self.ui.step_spinner(completed_runs)
self._process_remaining_runs(runs)
class BatchScheduler(RunScheduler):
def _process_remaining_runs(self, runs):
for run_id in runs:
try:
completed = False
while not completed:
completed = self._executor.execute_run(run_id)
self._indicate_progress(completed, run_id)
except FailedBuilding:
pass
class RoundRobinScheduler(RunScheduler):
def _process_remaining_runs(self, runs):
task_list = deque(runs)
while task_list:
try:
run = task_list.popleft()
completed = self._executor.execute_run(run)
self._indicate_progress(completed, run)
if not completed:
task_list.append(run)
except FailedBuilding:
pass
class RandomScheduler(RunScheduler):
def _process_remaining_runs(self, runs):
task_list = list(runs)
while task_list:
run = random.choice(task_list)
try:
completed = self._executor.execute_run(run)
self._indicate_progress(completed, run)
if completed:
task_list.remove(run)
except FailedBuilding:
task_list.remove(run)
class BenchmarkThread(Thread):
def __init__(self, par_scheduler, num):
Thread.__init__(self, name="BenchmarkThread %d" % num)
self._par_scheduler = par_scheduler
self._id = num
self.exception = None
def run(self):
try:
scheduler = self._par_scheduler.get_local_scheduler()
while True:
work = self._par_scheduler.acquire_work()
if work is None:
return
scheduler._process_remaining_runs(work)
except BaseException as exp:
self.exception = exp
class BenchmarkThreadExceptions(Exception):
def __init__(self, exceptions):
super(BenchmarkThreadExceptions, self).__init__()
self.exceptions = exceptions
class ParallelScheduler(RunScheduler):
def __init__(self, executor, seq_scheduler_class, ui):
RunScheduler.__init__(self, executor, ui)
self._seq_scheduler_class = seq_scheduler_class
self._lock = RLock()
self._num_worker_threads = self._number_of_threads()
self._remaining_work = None
self._worker_threads = None
def _number_of_threads(self):
# TODO: read the configuration elements!
non_interference_factor = float(2.5)
return int(floor(cpu_count() / non_interference_factor))
@staticmethod
def _split_runs(runs):
seq_runs = []
par_runs = []
for run in runs:
if run.execute_exclusively:
seq_runs.append(run)
else:
par_runs.append(run)
return seq_runs, par_runs
def _process_sequential_runs(self, runs):
seq_runs, par_runs = self._split_runs(runs)
scheduler = self._seq_scheduler_class(self._executor, self.ui)
scheduler._process_remaining_runs(seq_runs)
return par_runs
def _process_remaining_runs(self, runs):
self._remaining_work = self._process_sequential_runs(runs)
self._worker_threads = [BenchmarkThread(self, i)
for i in range(self._num_worker_threads)]
for thread in self._worker_threads:
thread.start()
exceptions = []
for thread in self._worker_threads:
thread.join()
if thread.exception is not None:
exceptions.append(thread.exception)
if exceptions:
if len(exceptions) == 1:
raise exceptions[0]
raise BenchmarkThreadExceptions(exceptions)
def _determine_num_work_items_to_take(self):
# use a simple and naive scheduling strategy that still allows for
# different running times, without causing too much scheduling overhead
k = len(self._remaining_work)
per_thread = int(floor(float(k) / float(self._num_worker_threads)))
per_thread = max(1, per_thread) # take at least 1 run
return per_thread
def get_local_scheduler(self):
return self._seq_scheduler_class(self._executor, self.ui)
def acquire_work(self):
with self._lock:
if not self._remaining_work:
return None
num = self._determine_num_work_items_to_take()
assert num <= len(self._remaining_work)
work = []
for _ in range(num):
work.append(self._remaining_work.pop())
return work
class Executor(object):
def __init__(self, runs, do_builds, ui, include_faulty=False,
debug=False, scheduler=BatchScheduler, build_log=None,
artifact_review=False, use_nice=False, use_shielding=False,
print_execution_plan=False):
self._runs = runs
self._use_nice = use_nice
self._use_shielding = use_shielding
self._print_execution_plan = print_execution_plan
self._do_builds = do_builds
self.ui = ui
self._include_faulty = include_faulty
self._debug = debug
self._scheduler = self._create_scheduler(scheduler)
self.build_log = build_log
self._artifact_review = artifact_review
num_runs = RunScheduler.number_of_uncompleted_runs(runs, ui)
for run in runs:
run.set_total_number_of_runs(num_runs)
def _create_scheduler(self, scheduler):
# figure out whether to use parallel scheduler
if cpu_count() > 1:
i = 0
for run in self._runs:
if not run.execute_exclusively:
i += 1
if i > 1:
return ParallelScheduler(self, scheduler, self.ui)
return scheduler(self, self.ui)
def _construct_cmdline(self, run_id, gauge_adapter):
cmdline = ""
use_denoise = self._use_nice or self._use_shielding
if use_denoise:
cmdline += "sudo rebench-denoise "
if not self._use_nice:
cmdline += "--without-nice "
if not self._use_shielding:
cmdline += "--without-shielding "
cmdline += "exec -- "
cmdline += gauge_adapter.acquire_command(run_id.cmdline())
return cmdline
def _build_executor_and_suite(self, run_id):
name = "E:" + run_id.benchmark.suite.executor.name
build = run_id.benchmark.suite.executor.build
self._process_builds(build, name, run_id)
name = "S:" + run_id.benchmark.suite.name
build = run_id.benchmark.suite.build
self._process_builds(build, name, run_id)
def _process_builds(self, builds, name, run_id):
if not builds:
return
for build in builds:
if build.is_built:
continue
if build.build_failed:
run_id.fail_immediately()
raise FailedBuilding(name, build)
self._execute_build_cmd(build, name, run_id)
def _execute_build_cmd(self, build_command, name, run_id):
path = build_command.location
if not path or path == ".":
path = os.getcwd()
script = build_command.command
self._scheduler.indicate_build(run_id)
self.ui.debug_output_info("Start build\n", None, script, path)
def _keep_alive(seconds):
self.ui.warning(
"Keep alive. current job runs since %dmin\n" % (seconds / 60), run_id, script, path)
try:
return_code, stdout_result, stderr_result = subprocess_timeout.run(
'/bin/sh', path, False, True,
stdin_input=str.encode(script),
keep_alive_output=_keep_alive)
except OSError as err:
build_command.mark_failed()
run_id.fail_immediately()
run_id.report_run_failed(
script, err.errno, "Build of " + name + " failed.")
if err.errno == 2:
msg = ("{ind}Build of %s failed.\n"
+ "{ind}{ind}It failed with: %s.\n"
+ "{ind}{ind}File name: %s\n") % (name, err.strerror, err.filename)
else:
msg = str(err)
self.ui.error(msg, run_id, script, path)
return
if self.build_log:
self.process_output(name, stdout_result, stderr_result)
if return_code != 0:
build_command.mark_failed()
run_id.fail_immediately()
run_id.report_run_failed(
script, return_code, "Build of " + name + " failed.")
self.ui.error("{ind}Build of " + name + " failed.\n", None, script, path)
if stdout_result and stdout_result.strip():
lines = escape_braces(stdout_result).split('\n')
self.ui.error("{ind}stdout:\n\n{ind}{ind}"
+ "\n{ind}{ind}".join(lines) + "\n")
if stderr_result and stderr_result.strip():
lines = escape_braces(stderr_result).split('\n')
self.ui.error("{ind}stderr:\n\n{ind}{ind}"
+ "\n{ind}{ind}".join(lines) + "\n")
raise FailedBuilding(name, build_command)
build_command.mark_succeeded()
def process_output(self, name, stdout_result, stderr_result):
with open_with_enc(self.build_log, 'a', encoding='utf-8') as log_file:
if stdout_result:
log_file.write(name + '|STD:')
log_file.write(stdout_result)
if stderr_result:
log_file.write(name + '|ERR:')
log_file.write(stderr_result)
def execute_run(self, run_id):
gauge_adapter = self._get_gauge_adapter_instance(
run_id.benchmark.gauge_adapter)
cmdline = self._construct_cmdline(run_id, gauge_adapter)
if self._print_execution_plan:
if run_id.location:
print("cd " + run_id.location)
print(cmdline)
return True
termination_check = run_id.get_termination_check(self.ui)
run_id.report_start_run()
terminate = self._check_termination_condition(run_id, termination_check, cmdline)
if not terminate and self._do_builds:
self._build_executor_and_suite(run_id)
# now start the actual execution
if not terminate:
terminate = self._generate_data_point(cmdline, gauge_adapter,
run_id, termination_check)
mean_of_totals = run_id.get_mean_of_totals()
if terminate:
run_id.report_run_completed(cmdline)
if (not run_id.is_failed and run_id.min_iteration_time
and mean_of_totals < run_id.min_iteration_time
and not self._artifact_review):
self.ui.warning(
("{ind}Warning: Low mean run time.\n"
+ "{ind}{ind}The mean (%.1f) is lower than min_iteration_time (%d)\n")
% (mean_of_totals, run_id.min_iteration_time), run_id, cmdline)
return terminate
def _get_gauge_adapter_instance(self, adapter_name):
adapter_name += "Adapter"
root = sys.modules['rebench.interop'].__path__
for _, name, _ in pkgutil.walk_packages(root):
# depending on how ReBench was executed, name might one of the two
try:
mod = __import__("rebench.interop." + name, fromlist=adapter_name)
except ImportError:
try:
mod = __import__("interop." + name, fromlist=adapter_name)
except ImportError:
mod = None
if mod is not None and hasattr(mod, adapter_name):
return getattr(mod, adapter_name)(self._include_faulty)
return None
def _generate_data_point(self, cmdline, gauge_adapter, run_id,
termination_check):
# execute the external program here
try:
self.ui.debug_output_info("{ind}Starting run\n", run_id, cmdline)
def _keep_alive(seconds):
self.ui.warning(
"Keep alive. current job runs since %dmin\n" % (seconds / 60), run_id, cmdline)
(return_code, output, _) = subprocess_timeout.run(
cmdline, cwd=run_id.location, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True, verbose=self._debug,
timeout=run_id.max_invocation_time,
keep_alive_output=_keep_alive)
except OSError as err:
run_id.fail_immediately()
if err.errno == 2:
msg = ("{ind}Failed executing run\n"
+ "{ind}{ind}It failed with: %s.\n"
+ "{ind}{ind}File name: %s\n") % (err.strerror, err.filename)
else:
msg = str(err)
self.ui.error(msg, run_id, cmdline)
return True
if return_code == 127:
msg = ("{ind}Error: Could not execute %s.\n"
+ "{ind}{ind}The command was not found.\n"
+ "{ind}Return code: %d\n"
+ "{ind}{ind}%s.\n") % (
run_id.benchmark.suite.executor.name, return_code, output.strip())
self.ui.error(msg, run_id, cmdline)
return True
elif return_code != 0 and not self._include_faulty and not (
return_code == subprocess_timeout.E_TIMEOUT and run_id.ignore_timeouts):
run_id.indicate_failed_execution()
run_id.report_run_failed(cmdline, return_code, output)
if return_code == 126:
msg = ("{ind}Error: Could not execute %s.\n"
+ "{ind}{ind}The file may not be marked as executable.\n"
+ "{ind}Return code: %d\n") % (
run_id.benchmark.suite.executor.name, return_code)
elif return_code == subprocess_timeout.E_TIMEOUT:
msg = ("{ind}Run timed out.\n"
+ "{ind}{ind}Return code: %d\n"
+ "{ind}{ind}max_invocation_time: %s\n") % (
return_code, run_id.max_invocation_time)
else:
msg = "{ind}Run failed. Return code: %d\n" % return_code
self.ui.error(msg, run_id, cmdline)
if output and output.strip():
lines = escape_braces(output).split('\n')
self.ui.error("{ind}Output:\n\n{ind}{ind}"
+ "\n{ind}{ind}".join(lines) + "\n")
else:
self._eval_output(output, run_id, gauge_adapter, cmdline)
return self._check_termination_condition(run_id, termination_check, cmdline)
def _eval_output(self, output, run_id, gauge_adapter, cmdline):
try:
data_points = gauge_adapter.parse_data(output, run_id, run_id.completed_invocations + 1)
warmup = run_id.warmup_iterations
num_points_to_show = 20
num_points = len(data_points)
msg = "{ind}Completed invocation\n"
if num_points > num_points_to_show:
msg += "{ind}{ind}Recorded %d data points, show last 20...\n" % num_points
i = 0
for data_point in data_points:
if warmup is not None and warmup > 0:
warmup -= 1
run_id.add_data_point(data_point, True)
else:
run_id.add_data_point(data_point, False)
# only log the last num_points_to_show results
if i >= num_points - num_points_to_show:
msg += "{ind}{ind}%4d\t%s%s\n" % (
i + 1, data_point.get_total_value(), data_point.get_total_unit())
i += 1
run_id.indicate_successful_execution()
self.ui.verbose_output_info(msg, run_id, cmdline)
except ExecutionDeliveredNoResults:
run_id.indicate_failed_execution()
run_id.report_run_failed(cmdline, 0, output)
@staticmethod
def _check_termination_condition(run_id, termination_check, cmd):
return termination_check.should_terminate(
run_id.get_number_of_data_points(), cmd)
def execute(self):
try:
self._scheduler.execute()
if self._print_execution_plan:
return True
successful = True
for run in self._runs:
run.report_job_completed(self._runs)
if run.is_failed:
successful = False
return successful
finally:
for run in self._runs:
run.close_files()
@property
def runs(self):
return self._runs
```
#### File: rebench/model/exp_run_details.py
```python
from . import none_or_int, none_or_float, none_or_bool, remove_important, prefer_important
class ExpRunDetails(object):
@classmethod
def compile(cls, config, defaults):
invocations = prefer_important(config.get('invocations'), defaults.invocations)
iterations = prefer_important(config.get('iterations'), defaults.iterations)
warmup = prefer_important(config.get('warmup'), defaults.warmup)
min_iteration_time = none_or_int(config.get('min_iteration_time',
defaults.min_iteration_time))
max_invocation_time = none_or_int(config.get('max_invocation_time',
defaults.max_invocation_time))
ignore_timeouts = none_or_bool(config.get('ignore_timeouts',
defaults.ignore_timeouts))
parallel_interference_factor = none_or_float(config.get(
'parallel_interference_factor', defaults.parallel_interference_factor))
execute_exclusively = none_or_bool(config.get('execute_exclusively',
defaults.execute_exclusively))
retries_after_failure = none_or_int(config.get('retries_after_failure',
defaults.retries_after_failure))
return ExpRunDetails(invocations, iterations, warmup, min_iteration_time,
max_invocation_time, ignore_timeouts, parallel_interference_factor,
execute_exclusively, retries_after_failure,
defaults.invocations_override, defaults.iterations_override)
@classmethod
def empty(cls):
return ExpRunDetails(None, None, None, None, None, None, None, None, None, None, None)
@classmethod
def default(cls, invocations_override, iterations_override):
return ExpRunDetails(1, 1, None, 50, -1, None, None, True, 0,
invocations_override, iterations_override)
def __init__(self, invocations, iterations, warmup, min_iteration_time,
max_invocation_time, ignore_timeouts, parallel_interference_factor,
execute_exclusively, retries_after_failure,
invocations_override, iterations_override):
self.invocations = invocations
self.iterations = iterations
self.warmup = warmup
self.min_iteration_time = min_iteration_time
self.max_invocation_time = max_invocation_time
self.ignore_timeouts = ignore_timeouts
self.parallel_interference_factor = parallel_interference_factor
self.execute_exclusively = execute_exclusively
self.retries_after_failure = retries_after_failure
self.invocations_override = invocations_override
self.iterations_override = iterations_override
def resolve_override_and_important(self):
# resolve overrides
if self.invocations_override is not None:
self.invocations = self.invocations_override
if self.iterations_override is not None:
self.iterations = self.iterations_override
# resolve important tags
self.invocations = remove_important(self.invocations)
self.iterations = remove_important(self.iterations)
self.warmup = remove_important(self.warmup)
def as_dict(self):
return {
'warmup': self.warmup,
'minIterationTime': self.min_iteration_time,
'maxInvocationTime': self.max_invocation_time
}
```
#### File: rebench/model/measurement.py
```python
from .run_id import RunId
class Measurement(object):
def __init__(self, invocation, iteration, value, unit,
run_id, criterion='total', line_number=None, filename=None):
self.invocation = invocation
self.iteration = iteration
self.value = value
self.unit = unit
self.run_id = run_id
self.criterion = criterion
assert unit is not None
self.line_number = line_number
self.filename = filename
def is_total(self):
return self.criterion == 'total'
def as_str_list(self):
if isinstance(self.value, float):
val = "%f" % self.value
else:
val = "%s" % self.value
return [str(self.invocation), str(self.iteration),
val,
self.unit,
self.criterion] + self.run_id.as_str_list()
@classmethod
def from_str_list(cls, data_store, str_list, line_number=None, filename=None):
invocation = int(str_list[0])
iteration = int(str_list[1])
value = float(str_list[2])
unit = str_list[3]
criterion = str_list[4]
run_id = RunId.from_str_list(data_store, str_list[5:])
return Measurement(invocation, iteration, value, unit, run_id,
criterion, line_number, filename)
def as_dict(self):
return {
'c': self.criterion,
'in': self.invocation,
'it': self.iteration,
'u': self.unit,
'v': self.value
}
```
#### File: tests/features/issue_57_binary_on_path_test.py
```python
from ...configurator import Configurator, load_config
from ...executor import Executor
from ...persistence import DataStore
from ..rebench_test_case import ReBenchTestCase
class Issue57ExecutableOnPath(ReBenchTestCase):
def setUp(self):
super(Issue57ExecutableOnPath, self).setUp()
self._set_path(__file__)
def test_sleep_gives_results(self):
store = DataStore(self.ui)
cnf = Configurator(load_config(self._path + '/issue_57.conf'), store,
self.ui, data_file=self._tmp_file)
runs = list(cnf.get_runs())
runs = sorted(runs, key=lambda e: e.benchmark.name)
ex = Executor(runs, False, self.ui, False)
ex.execute()
self.assertEqual("Bench1", runs[0].benchmark.name)
self.assertEqual(10, runs[0].get_number_of_data_points())
``` |
{
"source": "joshabell/PyPortalTideGraph",
"score": 3
} |
#### File: joshabell/PyPortalTideGraph/tidechart.py
```python
import time
import displayio
import json
import adafruit_requests as requests
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.label import Label
from adafruit_display_shapes.line import Line
from adafruit_display_shapes.sparkline import Sparkline
TEXT_COLOR = 0x000CFF
NOW_COLOR = 0xFFE666
PLOT_COLOR = 0xFFFFFF
INTERVAL = 30 # minutes
ENTRIES = int((24 * 60) / INTERVAL)
class Tidechart:
def __init__(self, width, height, group, stationid):
self.width = width
self.height = height
self.stationid = stationid
self.day = -1
cwd = ("/"+__file__).rsplit('/', 1)[0]
self.text_font = bitmap_font.load_font(cwd+"/fonts/Arial-12.bdf")
self.text_font.load_glyphs(b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWYZ1234567890-')
self.headline = displayio.Group(max_size = 2, x=0, y = 0)
self.graph = displayio.Group(max_size=3, x=0, y=0)
self.banner = Label(self.text_font, text="Tide Info / Current Time: 00:00:00", x=7, y=14)
self.nowline = Line(0, 30, 0, self.height-30, color=NOW_COLOR)
self.plot = Sparkline(int(self.width), int(self.height - 50), x=0, y=25, max_items=ENTRIES, color=PLOT_COLOR)
self.headline.append(self.banner)
self.graph.append(self.nowline)
self.graph.append(self.plot)
group.append(self.headline)
group.append(self.graph)
@property
def name(self):
return 'tidechart'
def update(self):
current_time = time.localtime()
# Once a day, fetch new data
if (self.day != current_time.tm_mday):
data = self._get_data()
points = self._calc_points(data)
self.plot.clear_values()
for p in points:
self.plot.add_value(int(p))
self.day = current_time.tm_mday
self.banner.text = "Tide Info / Current Time: {:02d}:{:02d}:{:02d}".format(current_time.tm_hour, current_time.tm_min, current_time.tm_sec)
self.nowline.x = self._calc_now_x()
print(current_time)
def _get_data(self):
# Calculate the URL for tide information
current_time = time.localtime()
start_time = "{:04d}{:02d}{:02d}".format(current_time.tm_year, current_time.tm_mon, current_time.tm_mday)
URL = "https://api.tidesandcurrents.noaa.gov/api/prod/datagetter?product=predictions&application=NOS.COOPS.TAC.WL&datum=MLLW&time_zone=lst_ldt&units=english&interval=30&format=json&" + \
"begin_date=" + start_time + "&" + \
"range=24&" + \
"station=" + self.stationid
print("Fetching text from : ", URL)
r = requests.get(URL)
response = r.text
r.close()
return response
def _calc_points(self, json_data):
data = json.loads(json_data)
points = []
for s in data["predictions"]:
tide_height = int((float(s["v"]) * 10))
points.append(tide_height)
return points
def _calc_now_x(self):
current_time = time.localtime()
day_percent = ((current_time.tm_hour * 60) + current_time.tm_min) / (24*60)
x = int(self.width * day_percent)
return x
``` |
{
"source": "JoshAddington/dotfiles",
"score": 2
} |
#### File: pythonFiles/isort/settings.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import os
from collections import namedtuple
from .pie_slice import *
try:
import configparser
except ImportError:
import ConfigParser as configparser
MAX_CONFIG_SEARCH_DEPTH = 25 # The number of parent directories isort will look for a config file within
DEFAULT_SECTIONS = ("FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER")
WrapModes = ('GRID', 'VERTICAL', 'HANGING_INDENT', 'VERTICAL_HANGING_INDENT', 'VERTICAL_GRID', 'VERTICAL_GRID_GROUPED', 'NOQA')
WrapModes = namedtuple('WrapModes', WrapModes)(*range(len(WrapModes)))
# Note that none of these lists must be complete as they are simply fallbacks for when included auto-detection fails.
default = {'force_to_top': [],
'skip': ['__init__.py', ],
'skip_glob': [],
'line_length': 79,
'wrap_length': 0,
'sections': DEFAULT_SECTIONS,
'known_future_library': ['__future__'],
'known_standard_library': ["abc", "anydbm", "argparse", "array", "asynchat", "asyncore", "atexit", "base64",
"BaseHTTPServer", "bisect", "bz2", "calendar", "cgitb", "cmd", "codecs",
"collections", "commands", "compileall", "ConfigParser", "contextlib", "Cookie",
"copy", "cPickle", "cProfile", "cStringIO", "csv", "datetime", "dbhash", "dbm",
"decimal", "difflib", "dircache", "dis", "doctest", "dumbdbm", "EasyDialogs",
"errno", "exceptions", "filecmp", "fileinput", "fnmatch", "fractions",
"functools", "gc", "gdbm", "getopt", "getpass", "gettext", "glob", "grp", "gzip",
"hashlib", "heapq", "hmac", "imaplib", "imp", "inspect", "io", "itertools", "json",
"linecache", "locale", "logging", "mailbox", "math", "mhlib", "mmap",
"multiprocessing", "operator", "optparse", "os", "pdb", "pickle", "pipes",
"pkgutil", "platform", "plistlib", "pprint", "profile", "pstats", "pwd", "pyclbr",
"pydoc", "Queue", "random", "re", "readline", "resource", "rlcompleter",
"robotparser", "sched", "select", "shelve", "shlex", "shutil", "signal",
"SimpleXMLRPCServer", "site", "sitecustomize", "smtpd", "smtplib", "socket",
"SocketServer", "sqlite3", "string", "StringIO", "struct", "subprocess", "sys",
"sysconfig", "tabnanny", "tarfile", "tempfile", "textwrap", "threading", "time",
"timeit", "trace", "traceback", "unittest", "urllib", "urllib2", "urlparse",
"usercustomize", "uuid", "warnings", "weakref", "webbrowser", "whichdb", "xml",
"xmlrpclib", "zipfile", "zipimport", "zlib", 'builtins', '__builtin__', 'thread',
"binascii", "statistics", "unicodedata", "fcntl"],
'known_third_party': ['google.appengine.api'],
'known_first_party': [],
'multi_line_output': WrapModes.GRID,
'forced_separate': [],
'indent': ' ' * 4,
'length_sort': False,
'add_imports': [],
'remove_imports': [],
'force_single_line': False,
'default_section': 'FIRSTPARTY',
'import_heading_future': '',
'import_heading_stdlib': '',
'import_heading_thirdparty': '',
'import_heading_firstparty': '',
'import_heading_localfolder': '',
'balanced_wrapping': False,
'use_parentheses': False,
'order_by_type': True,
'atomic': False,
'lines_after_imports': -1,
'lines_between_sections': 1,
'combine_as_imports': False,
'combine_star': False,
'include_trailing_comma': False,
'from_first': False,
'verbose': False,
'quiet': False,
'force_adds': False,
'force_alphabetical_sort': False,
'force_grid_wrap': False,
'force_sort_within_sections': False,
'show_diff': False}
@lru_cache()
def from_path(path):
computed_settings = default.copy()
_update_settings_with_config(path, '.editorconfig', '~/.editorconfig', ('*', '*.py', '**.py'), computed_settings)
_update_settings_with_config(path, '.isort.cfg', '~/.isort.cfg', ('settings', 'isort'), computed_settings)
_update_settings_with_config(path, 'setup.cfg', None, ('isort', ), computed_settings)
return computed_settings
def _update_settings_with_config(path, name, default, sections, computed_settings):
editor_config_file = default and os.path.expanduser(default)
tries = 0
current_directory = path
while current_directory and tries < MAX_CONFIG_SEARCH_DEPTH:
potential_path = os.path.join(current_directory, native_str(name))
if os.path.exists(potential_path):
editor_config_file = potential_path
break
new_directory = os.path.split(current_directory)[0]
if current_directory == new_directory:
break
current_directory = new_directory
tries += 1
if editor_config_file and os.path.exists(editor_config_file):
_update_with_config_file(editor_config_file, sections, computed_settings)
def _update_with_config_file(file_path, sections, computed_settings):
settings = _get_config_data(file_path, sections).copy()
if not settings:
return
if file_path.endswith(".editorconfig"):
indent_style = settings.pop('indent_style', "").strip()
indent_size = settings.pop('indent_size', "").strip()
if indent_style == "space":
computed_settings['indent'] = " " * (indent_size and int(indent_size) or 4)
elif indent_style == "tab":
computed_settings['indent'] = "\t" * (indent_size and int(indent_size) or 1)
max_line_length = settings.pop('max_line_length', "").strip()
if max_line_length:
computed_settings['line_length'] = int(max_line_length)
for key, value in itemsview(settings):
access_key = key.replace('not_', '').lower()
existing_value_type = type(default.get(access_key, ''))
if existing_value_type in (list, tuple):
# sections has fixed order values; no adding or substraction from any set
if access_key == 'sections':
computed_settings[access_key] = tuple(_as_list(value))
else:
existing_data = set(computed_settings.get(access_key, default.get(access_key)))
if key.startswith('not_'):
computed_settings[access_key] = list(existing_data.difference(_as_list(value)))
else:
computed_settings[access_key] = list(existing_data.union(_as_list(value)))
elif existing_value_type == bool and value.lower().strip() == "false":
computed_settings[access_key] = False
elif key.startswith('known_'):
computed_settings[access_key] = list(_as_list(value))
else:
computed_settings[access_key] = existing_value_type(value)
def _as_list(value):
return filter(bool, [item.strip() for item in value.split(",")])
@lru_cache()
def _get_config_data(file_path, sections):
with open(file_path, 'rU') as config_file:
if file_path.endswith(".editorconfig"):
line = "\n"
last_position = config_file.tell()
while line:
line = config_file.readline()
if "[" in line:
config_file.seek(last_position)
break
last_position = config_file.tell()
config = configparser.SafeConfigParser()
config.readfp(config_file)
settings = dict()
for section in sections:
if config.has_section(section):
settings.update(dict(config.items(section)))
return settings
return {}
def should_skip(filename, config):
"""Returns True if the file should be skipped based on the passed in settings."""
for skip_path in config['skip']:
if skip_path.endswith(filename):
return True
position = os.path.split(filename)
while position[1]:
if position[1] in config['skip']:
return True
position = os.path.split(position[0])
for glob in config['skip_glob']:
if fnmatch.fnmatch(filename, glob):
return True
return False
```
#### File: yapf/yapflib/split_penalty.py
```python
from lib2to3 import pytree
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
# TODO(morbo): Document the annotations in a centralized place. E.g., the
# README file.
UNBREAKABLE = 1000 * 1000
STRONGLY_CONNECTED = 2000
CONTIGUOUS_LIST = 500
NOT_TEST = 242
COMPARISON_EXPRESSION = 842
ARITHMETIC_EXPRESSION = 942
def ComputeSplitPenalties(tree):
"""Compute split penalties on tokens in the given parse tree.
Arguments:
tree: the top-level pytree node to annotate with penalties.
"""
_TreePenaltyAssigner().Visit(tree)
class _TreePenaltyAssigner(pytree_visitor.PyTreeVisitor):
"""Assigns split penalties to tokens, based on parse tree structure.
Split penalties are attached as annotations to tokens.
"""
def Visit_import_as_names(self, node): # pyline: disable=invalid-name
# import_as_names ::= import_as_name (',' import_as_name)* [',']
self.DefaultNodeVisit(node)
prev_child = None
for child in node.children:
if (prev_child and isinstance(prev_child, pytree.Leaf) and
prev_child.value == ','):
pytree_utils.SetNodeAnnotation(child,
pytree_utils.Annotation.SPLIT_PENALTY,
style.Get('SPLIT_PENALTY_IMPORT_NAMES'))
prev_child = child
def Visit_classdef(self, node): # pylint: disable=invalid-name
# classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite
#
# NAME
self._SetUnbreakable(node.children[1])
if len(node.children) > 4:
# opening '('
self._SetUnbreakable(node.children[2])
# ':'
self._SetUnbreakable(node.children[-2])
self.DefaultNodeVisit(node)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::= 'def' NAME parameters ['->' test] ':' suite
#
# Can't break before the function name and before the colon. The parameters
# are handled by child iteration.
colon_idx = 1
while pytree_utils.NodeName(node.children[colon_idx]) == 'simple_stmt':
colon_idx += 1
self._SetUnbreakable(node.children[colon_idx])
while colon_idx < len(node.children):
if (isinstance(node.children[colon_idx], pytree.Leaf) and
node.children[colon_idx].value == ':'):
break
colon_idx += 1
self._SetUnbreakable(node.children[colon_idx])
self.DefaultNodeVisit(node)
def Visit_lambdef(self, node): # pylint: disable=invalid-name
# lambdef ::= 'lambda' [varargslist] ':' test
# Loop over the lambda up to and including the colon.
self._SetUnbreakableOnChildren(node)
def Visit_parameters(self, node): # pylint: disable=invalid-name
# parameters ::= '(' [typedargslist] ')'
self.DefaultNodeVisit(node)
# Can't break before the opening paren of a parameter list.
self._SetUnbreakable(node.children[0])
self._SetStronglyConnected(node.children[-1])
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::= test [comp_for] | test '=' test # Really [keyword '='] test
self.DefaultNodeVisit(node)
index = 0
while index < len(node.children) - 1:
next_child = node.children[index + 1]
if isinstance(next_child, pytree.Leaf) and next_child.value == '=':
self._SetStronglyConnected(node.children[index + 1],
node.children[index + 2])
index += 1
def Visit_dotted_name(self, node): # pylint: disable=invalid-name
# dotted_name ::= NAME ('.' NAME)*
self._SetUnbreakableOnChildren(node)
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= ( (test ':' test
# (comp_for | (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
prev_child = None
for child in node.children:
self.Visit(child)
if pytree_utils.NodeName(child) == 'COLON':
# This is a key to a dictionary. We don't want to split the key if at
# all possible.
self._SetStronglyConnected(prev_child, child)
prev_child = child
def Visit_trailer(self, node): # pylint: disable=invalid-name
# trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
self.DefaultNodeVisit(node)
if node.children[0].value == '.':
self._SetUnbreakableOnChildren(node)
elif len(node.children) == 2:
# Don't split an empty argument list if at all possible.
self._SetStronglyConnected(node.children[1])
elif len(node.children) == 3:
if (pytree_utils.NodeName(node.children[1]) not in {'arglist', 'argument',
'term'}):
# Don't split an argument list with one element if at all possible.
self._SetStronglyConnected(node.children[1], node.children[2])
if pytree_utils.NodeName(node.children[-1]) == 'RSQB':
# Don't split the ending bracket of a subscript list.
self._SetStronglyConnected(node.children[-1])
def Visit_power(self, node): # pylint: disable=invalid-name,missing-docstring
# power ::= atom trailer* ['**' factor]
self.DefaultNodeVisit(node)
# When atom is followed by a trailer, we can not break between them.
# E.g. arr[idx] - no break allowed between 'arr' and '['.
if (len(node.children) > 1 and
pytree_utils.NodeName(node.children[1]) == 'trailer'):
# children[1] itself is a whole trailer: we don't want to
# mark all of it as unbreakable, only its first token: (, [ or .
self._SetUnbreakable(node.children[1].children[0])
# A special case when there are more trailers in the sequence. Given:
# atom tr1 tr2
# The last token of tr1 and the first token of tr2 comprise an unbreakable
# region. For example: foo.bar.baz(1)
# We can't put breaks between either of the '.', '(', or '[' and the names
# *preceding* them.
prev_trailer_idx = 1
while prev_trailer_idx < len(node.children) - 1:
cur_trailer_idx = prev_trailer_idx + 1
cur_trailer = node.children[cur_trailer_idx]
if pytree_utils.NodeName(cur_trailer) == 'trailer':
# Now we know we have two trailers one after the other
prev_trailer = node.children[prev_trailer_idx]
if prev_trailer.children[-1].value != ')':
# Set the previous node unbreakable if it's not a function call:
# atom tr1() tr2
# It may be necessary (though undesirable) to split up a previous
# function call's parentheses to the next line.
self._SetUnbreakable(prev_trailer.children[-1])
self._SetUnbreakable(cur_trailer.children[0])
prev_trailer_idx = cur_trailer_idx
else:
break
# We don't want to split before the last ')' of a function call. This also
# takes care of the special case of:
# atom tr1 tr2 ... trn
# where the 'tr#' are trailers that may end in a ')'.
for trailer in node.children[1:]:
if pytree_utils.NodeName(trailer) != 'trailer':
break
if trailer.children[0].value in '([':
if len(trailer.children) > 2:
self._SetUnbreakable(trailer.children[-1])
if _FirstChildNode(trailer).lineno == _LastChildNode(trailer).lineno:
# If the trailer was originally on one line, then try to keep it
# like that.
self._SetExpressionPenalty(trailer, CONTIGUOUS_LIST)
else:
# If the trailer's children are '()', then make it a strongly
# connected region. It's sometimes necessary, though undesirable, to
# split the two.
self._SetStronglyConnected(trailer.children[-1])
# If the original source has a "builder" style calls, then we should allow
# the reformatter to retain that.
_AllowBuilderStyleCalls(node)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
self._SetStronglyConnected(*node.children)
self.DefaultNodeVisit(node)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
pytree_utils.SetNodeAnnotation(_FirstChildNode(node),
pytree_utils.Annotation.SPLIT_PENALTY, 0)
self._SetStronglyConnected(*node.children[1:])
self.DefaultNodeVisit(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
pytree_utils.SetNodeAnnotation(node.children[0],
pytree_utils.Annotation.SPLIT_PENALTY, 0)
self._SetStronglyConnected(*node.children[1:])
self.DefaultNodeVisit(node)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
self.DefaultNodeVisit(node)
self._SetExpressionPenalty(node, NOT_TEST)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
self.DefaultNodeVisit(node)
self._SetExpressionPenalty(node, COMPARISON_EXPRESSION)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
self.DefaultNodeVisit(node)
self._SetExpressionPenalty(node, ARITHMETIC_EXPRESSION)
def Visit_atom(self, node): # pylint: disable=invalid-name
# atom ::= ('(' [yield_expr|testlist_gexp] ')'
# '[' [listmaker] ']' |
# '{' [dictsetmaker] '}')
self.DefaultNodeVisit(node)
if node.children[0].value == '(':
if node.children[0].lineno == node.children[-1].lineno:
self._SetExpressionPenalty(node, CONTIGUOUS_LIST)
if node.children[-1].value == ')':
if pytree_utils.NodeName(node.parent) == 'if_stmt':
pytree_utils.SetNodeAnnotation(node.children[-1],
pytree_utils.Annotation.SPLIT_PENALTY,
UNBREAKABLE)
else:
pytree_utils.SetNodeAnnotation(node.children[-1],
pytree_utils.Annotation.SPLIT_PENALTY,
STRONGLY_CONNECTED)
elif node.children[0].value in '[{':
# Keep empty containers together if we can.
lbracket = node.children[0]
rbracket = node.children[-1]
if len(node.children) == 2:
self._SetUnbreakable(node.children[-1])
elif (rbracket.value in ']}' and
lbracket.get_lineno() == rbracket.get_lineno() and
rbracket.column - lbracket.column < style.Get('COLUMN_LIMIT')):
self._SetExpressionPenalty(node, CONTIGUOUS_LIST)
############################################################################
# Helper methods that set the annotations.
def _SetUnbreakable(self, node):
"""Set an UNBREAKABLE penalty annotation for the given node."""
_RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE)
def _SetStronglyConnected(self, *nodes):
"""Set a STRONGLY_CONNECTED penalty annotation for the given nodes."""
for node in nodes:
_RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY,
STRONGLY_CONNECTED)
def _SetUnbreakableOnChildren(self, node):
"""Set an UNBREAKABLE penalty annotation on children of node."""
for child in node.children:
self.Visit(child)
start = 2 if hasattr(node.children[0], 'is_pseudo') else 1
for i in py3compat.range(start, len(node.children)):
self._SetUnbreakable(node.children[i])
def _SetExpressionPenalty(self, node, penalty):
"""Set an ARITHMETIC_EXPRESSION penalty annotation children nodes."""
def RecArithmeticExpression(node, first_child_leaf):
if node is first_child_leaf:
return
if isinstance(node, pytree.Leaf):
if node.value in {'(', 'for', 'if'}:
return
penalty_annotation = pytree_utils.GetNodeAnnotation(
node,
pytree_utils.Annotation.SPLIT_PENALTY,
default=0)
if penalty_annotation < penalty:
pytree_utils.SetNodeAnnotation(
node, pytree_utils.Annotation.SPLIT_PENALTY, penalty)
else:
for child in node.children:
RecArithmeticExpression(child, first_child_leaf)
RecArithmeticExpression(node, _FirstChildNode(node))
def _RecAnnotate(tree, annotate_name, annotate_value):
"""Recursively set the given annotation on all leafs of the subtree.
Takes care to only increase the penalty. If the node already has a higher
or equal penalty associated with it, this is a no-op.
Args:
tree: subtree to annotate
annotate_name: name of the annotation to set
annotate_value: value of the annotation to set
"""
for child in tree.children:
_RecAnnotate(child, annotate_name, annotate_value)
if isinstance(tree, pytree.Leaf):
cur_annotate = pytree_utils.GetNodeAnnotation(tree,
annotate_name,
default=0)
if cur_annotate < annotate_value:
pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value)
def _AllowBuilderStyleCalls(node):
"""Allow splitting before '.' if it's a builder style function call."""
def RecGetLeaves(node):
if isinstance(node, pytree.Leaf):
return [node]
children = []
for child in node.children:
children += RecGetLeaves(child)
return children
list_of_children = RecGetLeaves(node)
prev_child = None
for child in list_of_children:
if child.value == '.':
if prev_child.lineno != child.lineno:
pytree_utils.SetNodeAnnotation(child,
pytree_utils.Annotation.SPLIT_PENALTY, 0)
prev_child = child
def _FirstChildNode(node):
if isinstance(node, pytree.Leaf):
return node
return _FirstChildNode(node.children[0])
def _LastChildNode(node):
if isinstance(node, pytree.Leaf):
return node
return _LastChildNode(node.children[-1])
``` |
{
"source": "joshagoldstein/city-scrapers",
"score": 3
} |
#### File: city_scrapers/spiders/chi_ssa_17.py
```python
import re
from datetime import datetime
from city_scrapers_core.constants import CANCELLED, COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class ChiSsa17Spider(CityScrapersSpider):
name = "chi_ssa_17"
agency = "Chicago Special Service Area #17 Lakeview East"
timezone = "America/Chicago"
start_urls = ["https://lakevieweast.com/ssa-17/"]
location = {
"name": "Lakeview East Chamber of Commerce",
"address": "3208 N Sheffield Ave Chicago, IL 60657",
}
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
meetings_header = [
h
for h in response.css(".post-content h3")
if "Meeting Dates" in h.extract()
][0]
minutes = self._parse_minutes(response)
meeting_list = meetings_header.xpath("following-sibling::ul")[0].css("li")
for item in meeting_list:
start = self._parse_start(
item, " ".join(meetings_header.css("*::text").extract())
)
meeting = Meeting(
title="SSA #17 Commission",
description="",
classification=COMMISSION,
start=start,
end=None,
time_notes="See agenda to confirm details",
all_day=False,
location=self.location,
links=minutes.get(start.date(), []),
source=response.url,
)
meeting["status"] = self._get_status(meeting)
if len(item.css("ul")):
meeting["status"] = CANCELLED
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_start(self, item, header):
"""Parse start datetime"""
dt_str = re.sub(
r"([\.\*,]|\s(?=[apm]{2}$))", "", item.xpath("./text()").extract_first()
).strip()
date_match = re.search(r"[A-Z][a-z]{2,8} \d{1,2}[a-z]*,?( \d{4})?", dt_str)
if not date_match:
return
date_str = re.sub(r"(?<=\d)[a-z]+", "", date_match.group())
if not re.search(r"\d{1,2} \d{4}", date_str):
year_str = re.search(r"\d{4}", header).group()
date_str += " " + year_str
time_match = re.search(r"\d{1,2}\:\d{2}[apm]{2}", dt_str)
if not time_match:
time_str = "10:00am"
else:
time_str = time_match.group()
return datetime.strptime(" ".join([date_str, time_str]), "%B %d %Y %I:%M%p")
def _parse_minutes(self, response):
"""Parse minutes from separate list"""
minutes_header = [
h
for h in response.css(".post-content h3")
if "Meeting Minutes" in h.extract()
][0]
minutes_dict = {}
minutes_list = minutes_header.xpath("following-sibling::ul")[0].css("a")
for minutes in minutes_list:
minutes_text = re.sub(
r"(?<=\d)[a-z]+", "", minutes.xpath("./text()").extract_first()
)
minutes_date = datetime.strptime(
", ".join(minutes_text.split(", ")[-2:]), "%B %d, %Y",
).date()
minutes_dict[minutes_date] = [
{"href": minutes.attrib["href"], "title": "Minutes"}
]
return minutes_dict
```
#### File: city_scrapers/spiders/chi_ssa_52.py
```python
import re
from datetime import datetime
from difflib import SequenceMatcher
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class ChiSsa52Spider(CityScrapersSpider):
name = "chi_ssa_52"
agency = "Chicago Special Service Area #52 51st Street"
timezone = "America/Chicago"
start_urls = ["https://www.51ststreetchicago.com/about.html"]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
items = response.css("div.paragraph")[3:4]
title = items.css("strong::text").get()
meeting = items.css("ul")[0]
item = (title, meeting)
for meet in meeting.css("li"):
meet = self._clean_meet(meet)
meeting = Meeting(
title=self._parse_title(title),
description=self._parse_description(item),
classification=self._parse_classification(item),
start=self._parse_start(meet),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item),
links=self._parse_links(item),
source=self._parse_source(response),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _clean_meet(self, meet):
"""Clean a meet datetime info and group the info"""
meet = meet.css("::text").get()
meet = meet.replace("\xa0", "")
clean_str = re.sub(r"[^\w:]+", " ", meet)
meet_info = clean_str.split()
return meet_info
def _check_am_pm(self, time):
time = time.split(":")
hour = time[0]
minutes = time[1]
if int(hour) >= 8 and int(hour) <= 12:
return f"{hour}:{minutes} AM"
return f"{hour}:{minutes} PM"
def _parse_title(self, item):
"""Parse or generate meeting title."""
return "Commission"
def _parse_description(self, item):
"""Parse or generate meeting description."""
return ""
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
return COMMISSION
def _parse_start(self, item):
"""Parse start datetime as a naive datetime object."""
months = [
"JANUARY",
"FEBRUARY",
"MARCH",
"APRIL",
"MAY",
"JUNE",
"JULY",
"AUGUST",
"SEPTEMBER",
"OCTOBER",
"NOVEMBER",
"DECEMBER",
]
time = item[4]
time = self._check_am_pm(time)
try:
date = datetime.strptime(
f"{item[2]} {item[1]} {item[3]} {time}", "%d %B %Y %I:%M %p",
)
except ValueError:
for month in months:
ratio = SequenceMatcher(None, month, item[1]).ratio()
if ratio > 0.5:
date = datetime.strptime(
f"{item[2]} {month} {item[3]} {time}", "%d %B %Y %I:%M %p",
)
return date
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return None
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False."""
return False
def _parse_location(self, item):
"""Parse or generate location."""
return {
"address": "220 E 51st St Chicago, IL 60615",
"name": "51st Street Business Association",
}
def _parse_links(self, item):
"""Parse or generate links."""
return []
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
```
#### File: city_scrapers/spiders/chi_ssa_73.py
```python
import re
from datetime import datetime, time
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class ChiSsa73Spider(CityScrapersSpider):
name = "chi_ssa_73"
agency = "Chicago Special Service Area #73 Chinatown"
timezone = "America/Chicago"
start_urls = ["https://chinatownssa73.org/meeting-schedule/"]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
links_list = self._get_links(response)
location = self._parse_location(response)
ids_list = []
start_time = self._parse_time(response)
for item in response.css("article p"):
start = self._parse_start(item, start_time)
if not start:
continue
meeting = Meeting(
title="SSA #73 Chinatown Board",
description="",
classification=BOARD,
start=start,
end=None,
all_day=False,
time_notes="",
location=location,
links=self._parse_links(item, start, links_list),
source=response.url,
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
if meeting["id"] in ids_list:
continue
else:
ids_list.append(meeting["id"])
yield meeting
def _parse_start(self, item, start_time):
"""
Parse start date and time.
"""
date_str = item.css("*::text").extract_first()
if not date_str:
return
date_match = re.search(r"\w{3,9} \d{1,2}, \d{4}", date_str)
if date_match:
parsed_date = datetime.strptime(date_match.group(), "%B %d, %Y")
return datetime.combine(parsed_date.date(), start_time.time())
def _parse_time(self, response):
first_line = response.css("article p").extract_first()
time_match = re.search(r"\d{1,2}:\d{2} [ap]\.m", first_line)
if time_match:
temp_str = time_match.group()
temp_str = temp_str.replace(".", "")
temp_str = temp_str.upper()
return datetime.strptime(temp_str, "%I:%M %p")
else:
return time(18, 30)
def _parse_location(self, response):
"""
Parse or generate location.
"""
if "170<NAME>" in response.text:
return {
"address": "1700 S. Wentworth Avenue, Chicago, Illinois",
"name": "<NAME>",
}
elif "Zoom" in response.text:
return {
"address": "",
"name": "Zoom",
}
else:
raise ValueError("Meeting address has changed")
def _get_links(self, response):
links_list = []
for item in response.css("a"):
new_dict = {}
add_link = False
if "Agenda" in item.extract():
new_dict["title"] = "Agenda"
add_link = True
elif "Minutes" in item.extract():
new_dict["title"] = "Minutes"
add_link = True
if add_link:
new_dict["href"] = item.attrib["href"]
raw_ref = item.css("*::text").extract_first()
new_dict["date"] = raw_ref.split()[1]
links_list.append(new_dict)
return links_list
def _parse_links(self, item, start, links_list):
"""Parse or generate links."""
result_list = []
target_str_1 = start.strftime("%m-%d-%Y").replace(" 0", " ")
target_str_2 = start.strftime("%m-%d-%y").replace(" 0", " ")
for item in links_list:
if item["date"] in target_str_1 or item["date"] in target_str_2:
new_dict = {}
new_dict["href"] = item["href"]
new_dict["title"] = item["title"]
result_list.append(new_dict)
return result_list
```
#### File: city_scrapers/spiders/cook_board_ethics.py
```python
from datetime import datetime
import scrapy
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.relativedelta import relativedelta
class CookBoardEthicsSpider(CityScrapersSpider):
name = "cook_board_ethics"
agency = "Cook County Government Board of Ethics"
timezone = "America/Chicago"
allowed_domains = ["www.cookcountyil.gov"]
def start_requests(self):
today = datetime.now()
for month_delta in range(-2, 3):
mo_str = (today + relativedelta(months=month_delta)).strftime("%Y-%m")
url = (
f"https://www.cookcountyil.gov/calendar-node-field-date/month/{mo_str}"
)
yield scrapy.Request(url=url, method="GET", callback=self.parse)
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
for url in self._get_event_urls(response):
yield scrapy.Request(url, callback=self._parse_event, dont_filter=True)
def _parse_event(self, response):
"""Parse the event page."""
title = self._parse_title(response)
meeting = Meeting(
title=title,
description=self._parse_description(response),
classification=BOARD,
start=self._parse_start(response),
end=self._parse_end(response),
time_notes="",
all_day=self._parse_all_day(response),
location=self._parse_location(response),
links=self._parse_links(response),
source=response.url,
)
meeting["id"] = self._get_id(meeting)
meeting["status"] = self._get_status(meeting)
return meeting
def _get_event_urls(self, response):
"""
Get urls for all Board of ethics meetings on the page.
"""
return [
response.urljoin(href)
for href in response.xpath('//a[contains(text(), "Board of Ethics")]')
.css("a::attr(href)")
.extract()
]
def _parse_location(self, response):
"""
Parse or generate location. Url, latitude and longitude are all
optional and may be more trouble than they're worth to collect.
"""
address = response.xpath(
'//div[@class="field event-location"]/descendant::*/text()'
).extract()
address = " ".join([w for w in address if w not in ["Location:", ", ", " "]])
return {
"address": address,
"name": "",
}
def _parse_all_day(self, response):
"""
Parse or generate all-day status. Defaults to false.
"""
date = response.xpath(
'//span[@class="date-display-single"]/descendant-or-self::*/text()'
).extract()
date = "".join(date).upper()
return "ALL DAY" in date
def _parse_title(self, response):
"""Parse or generate event title."""
title = response.xpath("//h1/text()").extract_first()
if "Special" in title:
return "Special Board of Ethics Meeting"
elif "Board of Ethics" in title:
return "Board of Ethics"
else:
return title
def _parse_description(self, response):
"""
Parse or generate event description.
"""
category_field = response.xpath(
"//div[contains(., 'Category:') and contains(@class, 'field-label')]"
)
field_items = category_field.xpath(
"./following::div[contains(@class, 'field-items')]"
)
return " ".join(
field_items.xpath(".//p/text()").extract()
+ field_items.xpath(".//strong/text()").extract()
).strip()
def _parse_start(self, response):
"""
Parse start date and time.
"""
start = response.xpath(
'//span[@class="date-display-single"]/descendant-or-self::*/text()'
).extract()
start = "".join(start).upper()
start = start.split(" TO ")[0].strip()
start = start.replace("(ALL DAY)", "12:00AM")
return datetime.strptime(start, "%B %d, %Y %I:%M%p")
def _parse_end(self, response):
"""
Parse end date and time.
"""
date = response.xpath(
'//span[@class="date-display-single"]/descendant-or-self::*/text()'
).extract()
date = "".join(date).upper()
date.replace("(ALL DAY)", "TO 11:59PM")
start_end = date.split(" TO ")
if len(start_end) < 2:
return
end_time = start_end[1]
date = start_end[0][: start_end[0].rindex(" ")]
return datetime.strptime("{} {}".format(date, end_time), "%B %d, %Y %I:%M%p")
def _parse_links(self, response):
return [
{
"href": response.urljoin(link.attrib["href"]),
"title": link.xpath("./text()").extract_first(),
}
for link in response.css("span.file a")
]
```
#### File: city_scrapers/spiders/cook_zoning.py
```python
import re
from datetime import datetime, time
from city_scrapers_core.constants import FORUM
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class CookZoningSpider(CityScrapersSpider):
name = "cook_zoning"
agency = "Cook County Zoning Board of Appeals"
timezone = "America/Chicago"
start_urls = ["https://www.cookcountyil.gov/agency/zoning-board-appeals-0"]
location = {"name": "Virtual", "address": ""}
# location = {
# "name": "County Administration Building",
# "address": "69 W Washington St 22nd Floor Conference Room, Chicago, IL 60602",
# }
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
# self._validate_location(response)
hearing_list = response.css(".field-item ul")[1]
for item in hearing_list.css("li"):
item_text = " ".join(item.css("*::text").extract())
meeting = Meeting(
title="Public Hearing",
description="",
classification=FORUM,
start=self._parse_start(item_text),
end=None,
all_day=False,
time_notes="",
location=self.location,
links=self._parse_links(item),
source=response.url,
)
meeting["status"] = self._get_status(meeting, text=item_text)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_start(self, text):
"""Parse start datetime as a naive datetime object."""
date_match = re.search(r"[a-zA-Z]{3,10} \d{1,2},? \d{4}", text)
if not date_match:
return
date_str = date_match.group().replace(",", "")
date_obj = datetime.strptime(date_str, "%B %d %Y").date()
return datetime.combine(date_obj, time(13))
# def _validate_location(self, response):
# """Check if the meeting location has changed"""
# text = " ".join(response.css(".field-item p::text").extract())
# if "69 W" not in text:
# raise ValueError("Meeting location has changed")
def _parse_links(self, item):
"""Parse or generate links."""
links = []
for link in item.css("a"):
links.append({"title": "Agenda", "href": link.attrib["href"]})
return links
```
#### File: city_scrapers/spiders/il_arts_council.py
```python
import re
import unicodedata
from datetime import datetime as dt
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class IlArtsCouncilSpider(CityScrapersSpider):
name = "il_arts_council"
agency = "Illinois Arts Council"
timezone = "America/Chicago"
start_urls = ["http://www.arts.illinois.gov/about-iac/governance/council-meetings"]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
for table in response.xpath("//table/tbody"):
year = self._get_year(table)
for item in table.xpath("./tr")[1::]:
if "Date" in " ".join(item.css("td *::text").extract()):
continue
if len(item.css("td[colspan]")) > 0:
year = re.search(
r"\d{4}", " ".join(item.css("p *::text").extract())
).group()
continue
meeting = Meeting(
title=self._parse_title(item),
description=self._parse_description(item),
classification=self._parse_classification(item),
start=self._parse_start(item, year),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item),
links=self._parse_links(item, response),
source=self._parse_source(response),
)
meeting_status = item.xpath("td[2]/text()").get() or ""
meeting["status"] = self._get_status(meeting, text=meeting_status)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_title(self, item):
"""Parse or generate meeting title."""
return "Agency Board"
def _parse_description(self, item):
"""Parse or generate meeting description."""
return ""
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
return BOARD
def _parse_start(self, item, year):
"""Parse start datetime as a naive datetime object."""
item_text = " ".join(item.css("td:first-child *::text").extract())
text_clean = unicodedata.normalize("NFKD", item_text).strip()
if text_clean[-4::] == year:
start = dt.strptime(text_clean, "%A, %B %d, %Y")
elif text_clean[-2::] in ["am", "pm"]:
start = dt.strptime(text_clean + year, "%A, %B %d, %I:%M%p%Y")
else:
start = dt.strptime(text_clean + year, "%A, %B %d%Y")
return start
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return None
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False."""
return False
def _parse_location(self, item):
"""Parse or generate location."""
location = item.xpath("td[2]/text()").get()
if not location:
location = item.xpath("td[2]/a/text()").get()
jrtc = {
"address": "100 West Randolph, Suite 10-500, Chicago, IL 60601",
"name": "IACA/JRTC",
}
tba = {
"address": "TBA",
"name": "TBA",
}
if not location or "TBA" in location:
return tba
elif "JRTC" in location:
return jrtc
else:
return {
"address": "",
"name": "",
}
def _parse_links(self, item, response):
"""Parse or generate links."""
agenda_link = item.xpath("td/a/@href").get()
if agenda_link:
title = agenda_link.split("/")[-1].replace("%20", " ")
if "www.arts.illinois.gov" not in agenda_link:
agenda_link = response.urljoin(agenda_link)
return [{"href": agenda_link, "title": title}]
return []
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
def _get_year(self, item):
"""Gets the year for the meeting."""
year_xpath = "../preceding-sibling::p/strong/text()"
year_text = item.xpath(year_xpath)[-1].get().strip()
return year_text[0:4]
```
#### File: city_scrapers/spiders/il_governors_state_university.py
```python
import re
from datetime import datetime
from city_scrapers_core.constants import BOARD, COMMITTEE
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class IlGovernorsStateUniversitySpider(CityScrapersSpider):
name = "il_governors_state_university"
agency = "Governors State University"
timezone = "America/Chicago"
start_urls = ["https://www.govst.edu/BOT-Meetings/"]
time_re = r"(?i)([01]?\d)(:?\d*)\s*([ap]\.?m\.?)"
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
"""
for year_section in response.xpath('//div[@class="toggle-list"]/ul/li'):
year_elt = year_section.xpath('div[@class="title"]/h3/text()')
# sometimes the year is not present in the table dates, so grab it from the
# section heading as backup
year = year_elt.get().replace("Meeting Dates for ", "").strip()
for row in year_section.xpath('div[@class="content"]/table/tbody/tr'):
item = row.xpath("td")
title = self._parse_title(item)
if title is None:
continue
meeting = Meeting(
title=title,
description=self._parse_description(item),
classification=self._parse_classification(title),
start=self._parse_start(item, year),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item),
links=self._parse_links(item, response),
source=self._parse_source(response),
)
# if postponed or canceled appears in any of these columns, it means the
# meeting is canceled, so just pass in all the row text to _get_status
row_text = " ".join(row.css("* ::text").getall())
meeting["status"] = self._get_status(meeting, text=row_text)
meeting["id"] = self._get_id(meeting)
yield meeting
def _clean_igsu_title(self, title):
"""Reformat title to conform to project naming standards"""
if not title.startswith("Special"):
return re.sub(r"\s*Meeting\s*$", "", title)
return title
def _parse_title(self, item):
"""Parse or generate meeting title. The inner html of the first column varies
quite a bit - brs, divs, b tags - so figuring out what is the title based on
line position. Sometimes the "title" is only a date, so if all else fails,
return that.
Returns None if the title is 'Date', which indicates we're in a header row, or
if the title is empty, which indicates we're in a blank row.
If returning a string, strip 'Meeting' from the end."""
cell_text = item[0].css("* ::text").getall()
clean_cell_text = [elt.strip() for elt in cell_text if len(elt.strip()) > 0]
if (len(clean_cell_text) == 0) or ("date" == clean_cell_text[0].lower()):
return None
if len(clean_cell_text) == 1:
# then we either have no title or no date - or, occasionally, we have a
# comma-separated title and date. First check for \d\d\d\d under the
# assumption that this ends the date, and see if the remainder of the
# string is non-empty. Failing that, check if there are numbers,
# and if so assume it's a date and return Board of Trustees. Otherwise,
# return the line, assuming the whole thing is the title.
possible_title = clean_cell_text[0]
title_match = re.findall(r"\d\d\d\d\s+(.*)", possible_title)
if len(title_match) > 0:
return self._clean_igsu_title(title_match[0])
if re.search(r"\d", clean_cell_text[0]):
return "Board of Trustees"
return self._clean_igsu_title(clean_cell_text[0])
return self._clean_igsu_title(" ".join(clean_cell_text[1:]))
def _parse_description(self, item):
"""Parse or generate meeting description. Not available for this website."""
return ""
def _parse_classification(self, title):
"""Parse or generate classification from allowed options."""
if "committee" in title.lower():
return COMMITTEE
# if it isn't explicitly described as a committee meeting, then because this
# is a board calendar, all other meetings are board by default
return BOARD
def _normalize_date(self, date, default_year):
"""The dates appear in pretty variable formats, including in some cases without a year.
This method normalizes."""
clean_date = date.replace(",", "").replace(".", "").lower().strip()
# There was a stray "sept." in the data, although usually the month is
# fully spelled out. Use first three chars of the date string to get the month.
months = [
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"august",
"september",
"october",
"november",
"december",
]
month_map = {m[:3]: m for m in months}
month, day, year = re.findall(
r"([a-z]+)\.?\s+(\d\d?),?\s*(\d\d\d\d)?", clean_date
)[0]
month = month_map[month[:3]]
year = year if len(year) == 4 else default_year
return f"{month} {day} {year}"
def _normalize_time(self, time_str):
"""Normalize time format. Sometimes it comes with colons or periods,
sometimes not"""
times = re.findall(self.time_re, time_str)
if len(times) == 0:
return None
hour, minute, ampm = times[0]
if len(minute.strip(":")) > 0:
minute = minute.strip(":")
else:
minute = "00"
ampm = ampm.replace(".", "")
return f"{hour}:{minute} {ampm}"
def _parse_start(self, item, default_year):
"""Parse start datetime as a naive datetime object."""
# try to find the date in the first column, and if it isn't there, fall back
# to the third
day = " ".join(item[0].css("* ::text").getall())
if not re.search(r"\d", day):
day = " ".join(item[2].css("* ::text").getall())
clean_day = self._normalize_date(day, default_year)
time = " ".join(item[1].css("* ::text").getall()).lower()
clean_time = self._normalize_time(time)
if clean_time is not None:
return datetime.strptime(f"{clean_day} {clean_time}", "%B %d %Y %I:%M %p")
# fall back to midnight if no time specified
return datetime.strptime(clean_day, "%B %d %Y")
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None.
Not available for this website."""
return None
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False. Doesn't seem to occur
for this website, with the possible exception of the retreats, which aren't
quite all day"""
return False
def _parse_location(self, item):
"""Parse or generate location."""
unclean_location_cell_content = item[1].css("* ::text").getall()
# remove time if present, and clean
location_cell_content = []
for line in unclean_location_cell_content:
line = re.sub(self.time_re, "", line)
line = line.strip().strip("-").strip()
if len(line) > 0:
location_cell_content.append(line)
# It's not obvious whether the first line of the location_cell_content
# is a location name or address, so the rest of this method uses heuristics
# for this
default_name = "Governors State University"
default_address = "1 University Pkwy,\nUniversity Park, IL 60484"
name, address = default_name, default_address
# If the event was postponed or canceled, we will handle that in the
# event status, and can just use the defaults here
for elt in location_cell_content:
if ("postponed" in elt.lower()) or ("canceled" in elt.lower()):
return {"address": default_address, "name": default_name}
# If there is no name, just the address, we'll use the first line
# of the address as the name.
if len(location_cell_content) > 0:
name = location_cell_content[0]
# no obvious way to differentiate location names from addresses other than
# presence of numbers. We'll assume that the first line is title-only if it
# contains no numbers, otherwise that it begins the address.
if re.search(r"\d", name):
address = "\n".join(location_cell_content)
elif len(location_cell_content) > 1:
address = "\n".join(location_cell_content[1:])
# Room may end up in either the name or address; if it's present, we want to
# make sure it's part of the address. Sometimes room numbers appear without
# room, as a single word (see G330 in 2017) so handle them the same way
if "room " in address.lower():
address = address + "\n" + default_address
if "room " in name.lower():
if "room " not in address.lower():
address = name + "\n" + address
name = default_name
# special case for covid -- make sure zoom meetings don't show the university
# address!
if ("zoom" in name.lower()) or ("zoom" in address.lower()):
address = "Zoom"
name = "Zoom"
elif "location tbd" in name.lower():
address = name
# in some cases a one-word "address" like G330 in 2017 can make it through,
# so fall back to the default here as well
elif len(address.split()) == 1:
address = address + "\n" + default_address
return {
"address": address,
"name": name,
}
def _parse_links(self, item, response):
"""Parse or generate links."""
links = []
# the links to the agenda, if present, are in the third and fourth columns
for col in [2, 3]:
for link_parent in item[col].xpath("a"):
link_ext = link_parent.css("::attr(href)").get()
if link_ext is not None:
link = response.urljoin(link_ext)
title = link_parent.xpath("text()").get()
links.append({"href": link, "title": title})
return links
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
```
#### File: city-scrapers/tests/test_chi_library.py
```python
from datetime import datetime
from os.path import dirname, join
from unittest.mock import MagicMock
import pytest
from city_scrapers_core.constants import BOARD, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_library import ChiLibrarySpider
freezer = freeze_time("2018-12-20")
freezer.start()
session = MagicMock()
res_mock = MagicMock()
res_mock.status_code = 200
session.get.return_value = res_mock
test_response = file_response(
join(dirname(__file__), "files", "chi_library.html"),
url="https://www.chipublib.org/board-of-directors/board-meeting-schedule/",
)
spider = ChiLibrarySpider(session=session)
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Board of Directors"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2021, 1, 26, 9)
def test_id():
assert parsed_items[0]["id"] == "chi_library/202101260900/x/board_of_directors"
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_all_day():
assert parsed_items[0]["all_day"] is False
def test_location():
assert parsed_items[0]["location"] == {
"address": "",
"name": "Virtual",
}
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "https://www.chipublib.org/news/board-of-directors-meeting-agenda-january-26-2021/", # noqa
"title": "Agenda",
},
{
"href": "https://www.chipublib.org/news/board-of-directors-meeting-minutes-january-26-2021/", # noqa
"title": "Minutes",
},
]
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
@pytest.mark.parametrize("item", parsed_items)
def test_end(item):
assert item["end"] is None
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
assert (
item["source"]
== "https://www.chipublib.org/board-of-directors/board-meeting-schedule/"
)
```
#### File: city-scrapers/tests/test_cook_pharmaceutical_disposal.py
```python
from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import ADVISORY_COMMITTEE, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.cook_pharmaceutical_disposal import (
CookPharmaceuticalDisposalSpider,
)
test_response = file_response(
join(dirname(__file__), "files", "cook_pharmaceutical_disposal.html"),
url="https://www.cookcountysheriff.org/rx/advisory-committee/",
)
spider = CookPharmaceuticalDisposalSpider()
freezer = freeze_time("2020-10-06")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_title():
assert (
parsed_items[0]["title"]
== "Safe Disposal of Pharmaceuticals Advisory Committee"
)
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2019, 12, 10, 13, 0)
def test_end():
assert parsed_items[0]["end"] is None
def test_time_notes():
assert parsed_items[0]["time_notes"] == "See agenda to confirm exact times"
def test_id():
assert (
parsed_items[0]["id"]
== "cook_pharmaceutical_disposal/201912101300/x/"
+ "safe_disposal_of_pharmaceuticals_advisory_committee"
)
def test_status():
assert parsed_items[0]["status"] == PASSED
def test_location():
assert parsed_items[0]["location"] == {
"address": "50 W Washington St, Room 407, Chicago, IL 60602",
"name": "Daley Center",
}
def test_source():
assert (
parsed_items[0]["source"]
== "https://www.cookcountysheriff.org/rx/advisory-committee/"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "https://www.cookcountysheriff.org/wp-content/uploads/"
+ "2019/11/Dec.-10-2019-Advisory-Committee-Meeting-Agenda.pdf",
"title": "Dec. 10 2019 Advisory Committee Meeting Agenda",
}
]
def test_classification():
assert parsed_items[0]["classification"] == ADVISORY_COMMITTEE
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
``` |
{
"source": "joshainglis/cafeteria",
"score": 3
} |
#### File: cafeteria/datastructs/dict.py
```python
from copy import deepcopy
from json import loads, load, dumps
from os.path import isfile
from cafeteria.patterns.borg import Borg
from cafeteria.utilities import is_str
class AttributeDict(dict):
"""
A dictionary implementation that allows for all keys to be used as an
attribute. In this implementation we do proper get/setattr override here,
no self.__dict__ mambo jumbo.
"""
def __init__(self, *args, **kwargs):
super(AttributeDict, self).__init__(*args, **kwargs)
def __getattr__(self, item):
if item in self:
return self[item]
raise AttributeError("Could not get attr: '{}' from '{}'".format(item, self))
def __setattr__(self, key, value):
self[key] = value
class DeepAttributeDict(AttributeDict):
"""
A DeepAttributeDict is an AttributeDict of which dict objects at all depths
are converted to DeepAttributeDict.
"""
def __init__(self, *args, **kwargs):
super(DeepAttributeDict, self).__init__(*args, **kwargs)
self._deep_init()
def _deep_init(self):
for key, value in self.items():
if isinstance(value, dict) and not isinstance(value, AttributeDict):
self[key] = DeepAttributeDict(value)
class MergingDict(AttributeDict):
"""
A MergingDict is an AttributeDict whose attribute/item values are always
merged if the rvalue implements an update or append method. If the rvalue
is not merge-able, it is simply replaced.
"""
def replace(self, key, value):
"""
Convenience method provided as a way to replace a value mapped by a
key.This is required since a MergingDict always merges via assignment
of item/attribute.
:param key: Attribute name or item key to replace rvalue for.
:type key: object
:param value: The new value to assign.
:type value: object
:return:
"""
super(MergingDict, self).__setitem__(key, value)
def update(self, other=None, **kwargs):
"""
A special update method to handle merging of dict objects. For all
other iterable objects, we use the parent class update method. For
other objects, we simply make use of the internal merging logic.
:param other: An iterable object.
:type other: dict or object
:param kwargs: key/value pairs to update.
:rtype: None
"""
if other is not None:
if isinstance(other, dict):
for key in other:
self[key] = other[key]
else:
# noinspection PyTypeChecker
super(MergingDict, self).update(other)
for key in kwargs:
self._merge(key, kwargs[key])
def _merge_method(self, key):
"""
Identify a merge compatible method available in self[key]. Currently we
support 'update' and 'append'.
:param key: Attribute name or item key
:return: Method name usable to merge a value into the instance mapped
by key
:rtype: str
"""
if key in self:
for method in ["update", "append"]:
if hasattr(self[key], method):
return method
return None
def _merge(self, key, value):
"""
Internal merge logic implementation to allow merging of values when
setting attributes/items.
:param key: Attribute name or item key
:type key: str
:param value: Value to set attribute/item as.
:type value: object
:rtype: None
"""
method = self._merge_method(key)
if method is not None:
# strings are special, update methods like set.update looks for
# iterables
if method == "update" and is_str(value):
value = [value]
if (
method == "append"
and isinstance(self[key], list)
and isinstance(value, list)
):
# if rvalue is a list and given object is a list, we expect all
# values to be appended
method = "extend"
getattr(self[key], method)(value)
else:
super(MergingDict, self).__setitem__(key, value)
def __setitem__(self, key, value):
self._merge(key, value)
def __setattr__(self, key, value):
self._merge(key, value)
class DeepMergingDict(MergingDict):
"""
A DeepMergingDict is a MergingDict of which dict objects at all depths are
converted to DeepMergingDicts.
"""
def __init__(self, *args, **kwargs):
super(DeepMergingDict, self).__init__(*args, **kwargs)
self._deep_init()
@staticmethod
def _should_cast(value):
return isinstance(value, dict) and not isinstance(value, MergingDict)
def _deep_init(self):
for key, value in self.items():
if self._should_cast(value):
self.replace(key, DeepMergingDict(value))
def replace(self, key, value):
if self._should_cast(value):
value = DeepMergingDict(value)
super(DeepMergingDict, self).replace(key, value)
def update(self, other=None, **kwargs):
if self._should_cast(other):
other = DeepMergingDict(other)
super(DeepMergingDict, self).update(other, **kwargs)
class BorgDict(Borg, dict):
"""
An dict implementing the Borg Pattern. This can be extended via
inheritance. In this implementation the dict itself is not used. All
actions are mapped to the Borg shared state.
"""
def __init__(self, *args, **kwargs):
super(BorgDict, self).__init__()
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __delitem__(self, key):
delattr(self, key)
def __repr__(self):
return self.__dict__.__repr__()
def __str__(self):
return self.__dict__.__str__()
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __contains__(self, k):
return self.__dict__.__contains__(k)
def keys(self):
return self.__dict__.keys()
def get(self, *args, **kwargs):
return self.__dict__.get(*args, **kwargs)
def pop(self, *args, **kwargs):
return self.__dict__.pop(*args, **kwargs)
class JSONAttributeDict(AttributeDict):
"""
:type source: str or dict or cafeteria.datastructs.dict.JSONAttributeDict
"""
def __init__(self, source):
super(JSONAttributeDict, self).__init__()
try:
self.update(loads(source) if is_str(source) else deepcopy(source))
except ValueError:
if isfile(source):
with open(source) as sf:
self.update(load(sf))
else:
raise ValueError(source)
@property
def pretty(self):
return dumps(self, indent=2)
def __str__(self):
return self.pretty
def __repr__(self):
return self.pretty
```
#### File: cafeteria/datastructs/memory.py
```python
from enum import Enum
from re import match
from cafeteria.compat import long
BYTES = 1
KB = 1024 * BYTES
MB = 1024 * KB
GB = 1024 * MB
TB = 1024 * GB
class MemoryUnit(Enum):
BYTES = BYTES
KB = KB
MB = MB
GB = GB
TB = TB
class Memory(long):
# noinspection PyInitNewSignature
def __new__(cls, x, unit=None):
if isinstance(x, str):
units_regex = "|".join(MemoryUnit.__members__.keys())
m = match(r"^(\d+) ?({})$".format(units_regex), x)
if m is None:
raise ValueError(
'{} requires am integer or a string in the format "<value>'
' ({})"'.format(Memory.__class__.__name__, units_regex)
)
x = int(m.group(1)) * MemoryUnit.__members__.get(m.group(2)).value
elif unit is None:
raise ValueError("No unit provided.")
else:
x = x * unit.value
# noinspection PyTypeChecker
return super(Memory, cls).__new__(cls, x)
```
#### File: cafeteria/patterns/borg.py
```python
class BorgStateManager(object):
"""
A special State Manager for Borg classes and child classes. This is what
makes it possible for child classes to maintain their own state different
to both parents, siblings and their own children.
This itself implements the Borg pattern so that all its instances have a
shared state.
Each class state is mapped to the the hash of the class itself.
"""
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
@classmethod
def get_state(cls, clz):
"""
Retrieve the state of a given Class.
:param clz: types.ClassType
:return: Class state.
:rtype: dict
"""
if clz not in cls.__shared_state:
cls.__shared_state[clz] = (
clz.init_state() if hasattr(clz, "init_state") else {}
)
return cls.__shared_state[clz]
class Borg(object):
"""
A Borg pattern base class. Usable on its own or via inheritance. Uses
`cafeteria.patterns.borg.BorgStateManager` internally to achieve state
separation for children and grand children.
See http://code.activestate.com/recipes/66531-singleton-we-dont-need-no-stinkin-singleton-the-bo/ for more # noqa
information regarding the Borg Pattern.
"""
def __init__(self):
self.__dict__ = self._shared_state
@classmethod
def init_state(cls):
return {}
@property
def _shared_state(self):
return BorgStateManager.get_state(self.__class__)
``` |
{
"source": "joshainglis/github-commit-stats",
"score": 2
} |
#### File: alembic/versions/0011c7158340_add_files_table.py
```python
revision = '0011c7158340'
down_revision = 'f885a31cf49a'
branch_labels = None
depends_on = None
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('files',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('additions', sa.Integer(), nullable=False),
sa.Column('deletions', sa.Integer(), nullable=False),
sa.Column('status', sa.String(), nullable=True),
sa.Column('commit_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['commit_id'], ['commits.id'], name=op.f('fk_files_commit_id_commits')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_files'))
)
op.create_index('filename_index', 'files', ['filename'], unique=False)
op.create_index('status_index', 'files', ['status'], unique=False)
op.add_column('commits', sa.Column('additions', sa.Integer(), nullable=True))
op.add_column('commits', sa.Column('authored_at', sa.DateTime(), nullable=True))
op.add_column('commits', sa.Column('committed_at', sa.DateTime(), nullable=True))
op.add_column('commits', sa.Column('deletions', sa.Integer(), nullable=True))
op.create_index('authored_at_index', 'commits', ['authored_at'], unique=False)
op.create_index('committed_at_index', 'commits', ['committed_at'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('committed_at_index', table_name='commits')
op.drop_index('authored_at_index', table_name='commits')
op.drop_column('commits', 'deletions')
op.drop_column('commits', 'committed_at')
op.drop_column('commits', 'authored_at')
op.drop_column('commits', 'additions')
op.drop_index('status_index', table_name='files')
op.drop_index('filename_index', table_name='files')
op.drop_table('files')
### end Alembic commands ###
```
#### File: joshainglis/github-commit-stats/setup.py
```python
import os.path
from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup, find_packages
def extract_requirements(filename):
return [str(r.req) for r in parse_requirements(filename, session=PipSession())]
# load metadata
base_dir = os.path.dirname(__file__)
with open(os.path.join(base_dir, 'README.rst')) as f:
long_description = f.read()
install_requires = extract_requirements('requirements.txt')
tests_require = extract_requirements('requirements-test.txt')
setup(
name='ghstats',
version="0.0.1-dev",
url='https://github.com/joshainglis/github-commit-stats',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='',
long_description=long_description,
classifiers=[
'Intended Audience :: Developers',
'License :: MIT',
'Programming Language :: Python',
],
packages=find_packages(),
install_requires=install_requires,
scripts=['bin/ghstats'],
tests_require=tests_require,
)
``` |
{
"source": "joshainglis/mesa",
"score": 4
} |
#### File: examples/ConwaysGameOfLife/cgol_cell.py
```python
from mesa import Model, Agent
class CGoLCell(Agent):
'''Represents a single ALIVE or DEAD cell in the simulation.'''
DEAD = 0
ALIVE = 1
def __init__(self, pos, model, init_state):
'''
Create a cell, in the given state, at the given x, y position.
'''
Agent.__init__(self, pos, model)
self._x = pos[0]
self._y = pos[1]
self._state = init_state
self._nextState = None
def getX(self):
'''Return the x location of this cell.'''
return self._x
def getY(self):
'''Return the y location of this cell.'''
return self._y
def getState(self):
'''Return the current state (ALIVE or DEAD) of this cell.'''
return self._state
def step(self, model):
'''
Compute if the cell will be dead or alive at the next tick. This is
based on the number of alive or dead neighbors. The state is not
changed here, but is just computed and stored in self._nextState.
'''
# Get the neighbors and apply the rules on whether to be alive or dead
# at the next tick.
live_neighbors = 0
for n in model.grid.neighbor_iter( (self._x, self._y), True): # all 8 neighbors
if n.getState() == CGoLCell.ALIVE:
live_neighbors += 1
# Assume nextState is unchanged, unless changed below.
self._nextState = self._state
if self._state == CGoLCell.DEAD:
if live_neighbors == 3:
self._nextState = CGoLCell.ALIVE
else: # for when I am alive.
if live_neighbors < 2 or live_neighbors > 3:
self._nextState = CGoLCell.DEAD
# NOTE: we don't change our _state in this method because we need to
# iterate over all cells, checking their neighbors' _states before
# we change any of them.
def advance(self, model):
'''
Set the state to the new computed state -- computed in step().
'''
self._state = self._nextState
```
#### File: examples/Schelling/Schelling.py
```python
from __future__ import division # For Python 2.x compatibility
import random
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import SingleGrid
from mesa.datacollection import DataCollector
from mesa.visualization.TextVisualization import (TextData, TextGrid,
TextVisualization)
class SchellingModel(Model):
'''
Model class for the Schelling segregation model.
'''
def __init__(self, height, width, density, minority_pc, homophily):
'''
'''
self.height = height
self.width = width
self.density = density
self.minority_pc = minority_pc
self.homophily = homophily
self.schedule = RandomActivation(self)
self.grid = SingleGrid(height, width, torus=True)
self.happy = 0
self.datacollector = DataCollector(
{"happy": lambda m: m.happy}, # Model-level count of happy agents
# For testing purposes, agent's individual x and y
{"x": lambda a: a.pos[0], "y": lambda a: a.pos[1]})
self.running = True
# Set up agents
# We use a grid iterator that returns
# the coordinates of a cell as well as
# its contents. (coord_iter)
for cell in self.grid.coord_iter():
x = cell[1]
y = cell[2]
if random.random() < self.density:
if random.random() < self.minority_pc:
agent_type = 1
else:
agent_type = 0
agent = SchellingAgent((x, y), agent_type)
self.grid.position_agent(agent, (x, y))
self.schedule.add(agent)
def step(self):
'''
Run one step of the model. If All agents are happy, halt the model.
'''
self.happy = 0 # Reset counter of happy agents
self.schedule.step()
self.datacollector.collect(self)
if self.happy == self.schedule.get_agent_count():
self.running = False
class SchellingAgent(Agent):
'''
Schelling segregation agent
'''
def __init__(self, pos, agent_type):
'''
Create a new Schelling agent.
Args:
unique_id: Unique identifier for the agent.
x, y: Agent initial location.
agent_type: Indicator for the agent's type (minority=1, majority=0)
'''
self.unique_id = pos
self.pos = pos
self.type = agent_type
def step(self, model):
similar = 0
for neighbor in model.grid.neighbor_iter(self.pos):
if neighbor.type == self.type:
similar += 1
# If unhappy, move:
if similar < model.homophily:
model.grid.move_to_empty(self)
else:
model.happy += 1
class SchellingTextVisualization(TextVisualization):
'''
ASCII visualization for schelling model
'''
def __init__(self, model):
'''
Create new Schelling ASCII visualization.
'''
self.model = model
grid_viz = TextGrid(self.model.grid, self.ascii_agent)
happy_viz = TextData(self.model, 'happy')
self.elements = [grid_viz, happy_viz]
@staticmethod
def ascii_agent(a):
'''
Minority agents are X, Majority are O.
'''
if a.type == 0:
return 'O'
if a.type == 1:
return 'X'
```
#### File: mesa/mesa/space.py
```python
import itertools
import random
import math
RANDOM = -1
X = 0
Y = 1
def accept_tuple_argument(wrapped_function):
'''
Decorator to allow grid methods that take a list of (x, y) position tuples
to also handle a single position, by automatically wrapping tuple in
single-item list rather than forcing user to do it.
'''
def wrapper(*args):
if isinstance(args[1], tuple) and len(args[1]) == 2:
return wrapped_function(args[0], [args[1]])
else:
return wrapped_function(*args)
return wrapper
class Grid(object):
'''
Base class for a square grid.
Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left
and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top
and bottom, and left and right, edges wrap to each other
Properties:
width, height: The grid's width and height.
torus: Boolean which determines whether to treat the grid as a torus.
grid: Internal list-of-lists which holds the grid cells themselves.
Methods:
get_neighbors: Returns the objects surrounding a given cell.
get_neighborhood: Returns the cells surrounding a given cell.
get_cell_list_contents: Returns the contents of a list of cells
((x,y) tuples)
'''
def __init__(self, height, width, torus):
'''
Create a new grid.
Args:
height, width: The height and width of the grid
torus: Boolean whether the grid wraps or not.
'''
self.height = height
self.width = width
self.torus = torus
self.grid = []
for y in range(self.height):
row = []
for x in range(self.width):
row.append(self.default_val())
self.grid.append(row)
@staticmethod
def default_val():
"""
Default value for new cell elements.
"""
return None
def __getitem__(self, index):
return self.grid[index]
def __iter__(self):
# create an iterator that chains the
# rows of grid together as if one list:
return itertools.chain(*self.grid)
def coord_iter(self):
"""
An iterator that returns coordinates as well as cell contents.
"""
for row in range(self.height):
for col in range(self.width):
yield self.grid[row][col], col, row # agent, x, y
def neighbor_iter(self, pos, moore=True):
"""
Iterate over position neighbors.
Args:
pos: (x,y) coords tuple for the position to get the neighbors of.
moore: Boolean for whether to use Moore neighborhood (including
diagonals) or Von Neumann (only up/down/left/right).
"""
neighborhood = self.iter_neighborhood(pos, moore=moore)
return self.iter_cell_list_contents(neighborhood)
def iter_neighborhood(self, pos, moore,
include_center=False, radius=1):
"""
Return an iterator over cell coordinates that are in the
neighborhood of a certain point.
Args:
pos: Coordinate tuple for the neighborhood to get.
moore: If True, return Moore neighborhood
(including diagonals)
If False, return Von Neumann neighborhood
(exclude diagonals)
include_center: If True, return the (x, y) cell as well.
Otherwise, return surrounding cells only.
radius: radius, in cells, of neighborhood to get.
Returns:
A list of coordinate tuples representing the neighborhood;
With radius 1, at most 9 if
Moore, 5 if Von Neumann
(8 and 4 if not including the center).
"""
x, y = pos
coordinates = set()
for dy in range(-radius, radius + 1):
for dx in range(-radius, radius + 1):
if dx == 0 and dy == 0 and not include_center:
continue
# Skip diagonals in Von Neumann neighborhood.
if not moore and dy != 0 and dx != 0:
continue
# Skip diagonals in Moore neighborhood when distance > radius
if moore and radius > 1 and (dy ** 2 + dx ** 2) ** .5 > radius:
continue
# Skip if not a torus and new coords out of bounds.
if not self.torus and (not (0 <= dx + x < self.width) or
not (0 <= dy + y < self.height)):
continue
px = self.torus_adj(x + dx, self.width)
py = self.torus_adj(y + dy, self.height)
# Skip if new coords out of bounds.
if(self.out_of_bounds((px, py))):
continue
coords = (px, py)
if coords not in coordinates:
coordinates.add(coords)
yield coords
def get_neighborhood(self, pos, moore,
include_center=False, radius=1):
"""
Return a list of cells that are in the
neighborhood of a certain point.
Args:
pos: Coordinate tuple for the neighborhood to get.
moore: If True, return Moore neighborhood
(including diagonals)
If False, return Von Neumann neighborhood
(exclude diagonals)
include_center: If True, return the (x, y) cell as well.
Otherwise, return surrounding cells only.
radius: radius, in cells, of neighborhood to get.
Returns:
A list of coordinate tuples representing the neighborhood;
With radius 1, at most 9 if
Moore, 5 if Von Neumann
(8 and 4 if not including the center).
"""
return list(self.iter_neighborhood(pos, moore, include_center, radius))
def iter_neighbors(self, pos, moore,
include_center=False, radius=1):
"""
Return an iterator over neighbors to a certain point.
Args:
pos: Coordinates for the neighborhood to get.
moore: If True, return Moore neighborhood
(including diagonals)
If False, return Von Neumann neighborhood
(exclude diagonals)
include_center: If True, return the (x, y) cell as well.
Otherwise,
return surrounding cells only.
radius: radius, in cells, of neighborhood to get.
Returns:
An iterator of non-None objects in the given neighborhood;
at most 9 if Moore, 5 if Von-Neumann
(8 and 4 if not including the center).
"""
neighborhood = self.iter_neighborhood(
pos, moore, include_center, radius)
return self.iter_cell_list_contents(neighborhood)
def get_neighbors(self, pos, moore,
include_center=False, radius=1):
"""
Return a list of neighbors to a certain point.
Args:
pos: Coordinate tuple for the neighborhood to get.
moore: If True, return Moore neighborhood
(including diagonals)
If False, return Von Neumann neighborhood
(exclude diagonals)
include_center: If True, return the (x, y) cell as well.
Otherwise,
return surrounding cells only.
radius: radius, in cells, of neighborhood to get.
Returns:
A list of non-None objects in the given neighborhood;
at most 9 if Moore, 5 if Von-Neumann
(8 and 4 if not including the center).
"""
return list(self.iter_neighbors(
pos, moore, include_center, radius))
def torus_adj(self, coord, dim_len):
"""
Convert coordinate, handling torus looping.
"""
if self.torus:
coord %= dim_len
return coord
def out_of_bounds(self, pos):
"""
Is pos off the grid?
"""
x, y = pos
return x < 0 or x >= self.width or y < 0 or y >= self.height
@accept_tuple_argument
def iter_cell_list_contents(self, cell_list):
'''
Args:
cell_list: Array-like of (x, y) tuples, or single tuple.
Returns:
A iterator of the contents of the cells identified in cell_list
'''
return (
self[y][x] for x, y in cell_list if not self.is_cell_empty((x, y)))
@accept_tuple_argument
def get_cell_list_contents(self, cell_list):
'''
Args:
cell_list: Array-like of (x, y) tuples, or single tuple.
Returns:
A list of the contents of the cells identified in cell_list
'''
return list(self.iter_cell_list_contents(cell_list))
def move_agent(self, agent, pos):
'''
Move an agent from its current position to a new position.
Args:
agent: Agent object to move. Assumed to have its current location
stored in a 'pos' tuple.
pos: Tuple of new position to move the agent to.
'''
self._remove_agent(agent.pos, agent)
self._place_agent(pos, agent)
agent.pos = pos
def place_agent(self, agent, pos):
'''
Position an agent on the grid, and set its pos variable.
'''
self._place_agent(pos, agent)
agent.pos = pos
def _place_agent(self, pos, agent):
'''
Place the agent at the correct location.
'''
x, y = pos
self.grid[y][x] = agent
def _remove_agent(self, pos, agent):
'''
Remove the agent from the given location.
'''
x, y = pos
self.grid[y][x] = None
def is_cell_empty(self, pos):
x, y = pos
return True if self.grid[y][x] == self.default_val() else False
class SingleGrid(Grid):
'''
Grid where each cell contains exactly at most one object.
'''
empties = []
def __init__(self, height, width, torus):
'''
Create a new single-item grid.
Args:
height, width: The height and width of the grid
torus: Boolean whether the grid wraps or not.
'''
super().__init__(height, width, torus)
# Add all cells to the empties list.
self.empties = list(itertools.product(
*(range(self.width), range(self.height))))
def move_to_empty(self, agent):
"""
Moves agent to a random empty cell, vacating agent's old cell.
"""
pos = agent.pos
new_pos = self.find_empty()
if new_pos is None:
raise Exception("ERROR: No empty cells")
else:
self._place_agent(new_pos, agent)
agent.pos = new_pos
self._remove_agent(pos, agent)
def find_empty(self):
'''
Pick a random empty cell.
'''
if self.exists_empty_cells():
pos = random.choice(self.empties)
return pos
else:
return None
def exists_empty_cells(self):
"""
Return True if any cells empty else False.
"""
return len(self.empties) > 0
def position_agent(self, agent, x=RANDOM, y=RANDOM):
"""
Position an agent on the grid.
This is used when first placing agents! Use 'move_to_empty()'
when you want agents to jump to an empty cell.
Use 'swap_pos()' to swap agents positions.
If x or y are positive, they are used, but if RANDOM,
we get a random position.
Ensure this random position is not occupied (in Grid).
"""
if x == RANDOM or y == RANDOM:
coords = self.find_empty()
if coords is None:
raise Exception("ERROR: Grid full")
else:
coords = (x, y)
agent.pos = coords
self._place_agent(coords, agent)
def _place_agent(self, pos, agent):
if self.is_cell_empty(pos):
super()._place_agent(pos, agent)
self.empties.remove(pos)
else:
raise Exception("Cell not empty")
def _remove_agent(self, pos, agent):
super()._remove_agent(pos, agent)
self.empties.append(pos)
class MultiGrid(Grid):
'''
Grid where each cell can contain more than one object.
Grid cells are indexed by [y][x], where [0][0] is assumed to be -- top-left
and [height-1][width-1] is the bottom-right. If a grid is toroidal, the top
and bottom, and left and right, edges wrap to each other.
Each grid cell holds a set object.
Properties:
width, height: The grid's width and height.
torus: Boolean which determines whether to treat the grid as a torus.
grid: Internal list-of-lists which holds the grid cells themselves.
Methods:
get_neighbors: Returns the objects surrounding a given cell.
'''
@staticmethod
def default_val():
"""
Default value for new cell elements.
"""
return set()
def _place_agent(self, pos, agent):
'''
Place the agent at the correct location.
'''
x, y = pos
self.grid[y][x].add(agent)
def _remove_agent(self, pos, agent):
'''
Remove the agent from the given location.
'''
x, y = pos
self.grid[y][x].remove(agent)
@accept_tuple_argument
def iter_cell_list_contents(self, cell_list):
'''
Args:
cell_list: Array-like of (x, y) tuples, or single tuple.
Returns:
A iterator of the contents of the cells identified in cell_list
'''
return itertools.chain.from_iterable(
self[y][x] for x, y in cell_list if not self.is_cell_empty((x, y)))
class ContinuousSpace(object):
'''
Continuous space where each agent can have an arbitrary position.
Assumes that all agents are point objects, and have a pos property storing
their position as an (x, y) tuple. This class uses a MultiGrid internally
to store agent objects, to speed up neighborhood lookups.
'''
_grid = None
def __init__(self, x_max, y_max, torus, x_min=0, y_min=0,
grid_width=100, grid_height=100):
'''
Create a new continuous space.
Args:
x_max, y_max: Maximum x and y coordinates for the space.
torus: Boolean for whether the edges loop around.
x_min, y_min: (default 0) If provided, set the minimum x and y
coordinates for the space. Below them, values loop to
the other edge (if torus=True) or raise an exception.
grid_width, _height: (default 100) Determine the size of the
internal storage grid. More cells will slow
down movement, but speed up neighbor lookup.
Probably only fiddle with this if one or the
other is impacting your model's performance.
'''
self.x_min = x_min
self.x_max = x_max
self.width = x_max - x_min
self.y_min = y_min
self.y_max = y_max
self.height = y_max - y_min
self.torus = torus
self.cell_width = (self.x_max - self.x_min) / grid_width
self.cell_height = (self.y_max - self.y_min) / grid_height
self._grid = MultiGrid(grid_height, grid_width, torus)
def place_agent(self, agent, pos):
'''
Place a new agent in the space.
Args:
agent: Agent object to place.
pos: Coordinate tuple for where to place the agent.
'''
pos = self.torus_adj(pos)
self._place_agent(pos, agent)
agent.pos = pos
def move_agent(self, agent, pos):
'''
Move an agent from its current position to a new position.
Args:
agent: The agent object to move.
pos: Coordinate tuple to move the agent to.
'''
pos = self.torus_adj(pos)
self._remove_agent(agent.pos, agent)
self._place_agent(pos, agent)
agent.pos = pos
def _place_agent(self, pos, agent):
'''
Place an agent at a given point, and update the internal grid.
'''
cell = self._point_to_cell(pos)
self._grid._place_agent(cell, agent)
def _remove_agent(self, pos, agent):
'''
Remove an agent at a given point, and update the internal grid.
'''
cell = self._point_to_cell(pos)
self._grid._remove_agent(cell, agent)
def get_neighbors(self, pos, radius, include_center=True):
'''
Get all objects within a certain radius.
Args:
pos: (x,y) coordinate tuple to center the search at.
radius: Get all the objects within this distance of the center.
include_center: If True, include an object at the *exact* provided
coordinates. i.e. if you are searching for the
neighbors of a given agent, True will include that
agent in the results.
'''
# Get candidate objects
scale = max(self.cell_width, self.cell_height)
cell_radius = math.ceil(radius / scale)
cell_pos = self._point_to_cell(pos)
possible_objs = self._grid.get_neighbors(cell_pos,
True, True, cell_radius)
neighbors = []
# Iterate over candidates and check actual distance.
for obj in possible_objs:
dist = self.get_distance(pos, obj.pos)
if dist <= radius and (include_center or dist > 0):
neighbors.append(obj)
return neighbors
def get_distance(self, pos_1, pos_2):
'''
Get the distance between two point, accounting for toroidal space.
Args:
pos_1, pos_2: Coordinate tuples for both points.
'''
x1, y1 = pos_1
x2, y2 = pos_2
if not self.torus:
dx = x1 - x2
dy = y1 - y2
else:
d_x = abs(x1 - x2)
d_y = abs(y1 - y2)
dx = min(d_x, self.width - d_x)
dy = min(d_y, self.height - d_y)
return math.sqrt(dx ** 2 + dy ** 2)
def torus_adj(self, pos):
'''
Adjust coordinates to handle torus looping.
If the coordinate is out-of-bounds and the space is toroidal, return
the corresponding point within the space. If the space is not toroidal,
raise an exception.
Args:
pos: Coordinate tuple to convert.
'''
if not self.out_of_bounds(pos):
return pos
elif not self.torus:
raise Exception("Point out of bounds, and space non-toroidal.")
else:
x = self.x_min + ((pos[0] - self.x_min) % self.width)
y = self.y_min + ((pos[1] - self.y_min) % self.height)
return (x, y)
def _point_to_cell(self, pos):
'''
Get the cell coordinates that a given x,y point falls in.
'''
if self.out_of_bounds(pos):
raise Exception("Point out of bounds.")
x, y = pos
cell_x = math.floor((x - self.x_min) / self.cell_width)
cell_y = math.floor((y - self.y_min) / self.cell_height)
return (cell_x, cell_y)
def out_of_bounds(self, pos):
'''
Check if a point is out of bounds.
'''
x, y = pos
return (x < self.x_min or x > self.x_max or
y < self.y_min or y > self.y_max)
```
#### File: mesa/tests/test_grid.py
```python
import unittest
from mesa.space import Grid, SingleGrid, MultiGrid
# Initial agent positions for testing
# X ---- >
TEST_GRID = [[0, 1, 0, 1, 0], # Y
[0, 1, 1, 0, 0], # |
[0, 0, 0, 1, 0]] # V
class MockAgent(object):
'''
Minimalistic agent for testing purposes.
'''
def __init__(self, unique_id, pos):
self.unique_id = unique_id
self.pos = pos
class TestBaseGrid(unittest.TestCase):
'''
Testing a non-toroidal grid.
'''
torus = False
def setUp(self):
'''
Create a test non-toroidal grid and populate it with Mock Agents
'''
self.grid = Grid(3, 5, self.torus)
self.agents = []
counter = 0
for y in range(3):
for x in range(5):
if TEST_GRID[y][x] == 0:
continue
counter += 1
# Create and place the mock agent
a = MockAgent(counter, None)
self.agents.append(a)
self.grid.place_agent(a, (x, y))
def test_agent_positions(self):
'''
Ensure that the agents are all placed properly.
'''
for agent in self.agents:
x, y = agent.pos
assert self.grid[y][x] == agent
def test_cell_agent_reporting(self):
'''
Ensure that if an agent is in a cell, get_cell_list_contents accurately
reports that fact.
'''
for agent in self.agents:
x, y = agent.pos
assert agent in self.grid.get_cell_list_contents([(x, y)])
def test_listfree_cell_agent_reporting(self):
'''
Ensure that if an agent is in a cell, get_cell_list_contents accurately
reports that fact, even when single position is not wrapped in a list.
'''
for agent in self.agents:
x, y = agent.pos
assert agent in self.grid.get_cell_list_contents((x, y))
def test_iter_cell_agent_reporting(self):
'''
Ensure that if an agent is in a cell, iter_cell_list_contents
accurately reports that fact.
'''
for agent in self.agents:
x, y = agent.pos
assert agent in self.grid.iter_cell_list_contents([(x, y)])
def test_listfree_iter_cell_agent_reporting(self):
'''
Ensure that if an agent is in a cell, iter_cell_list_contents
accurately reports that fact, even when single position is not
wrapped in a list.
'''
for agent in self.agents:
x, y = agent.pos
assert agent in self.grid.iter_cell_list_contents((x, y))
def test_neighbors(self):
'''
Test the base neighborhood methods on the non-toroid.
'''
neighborhood = self.grid.get_neighborhood((1, 1), moore=True)
assert len(neighborhood) == 8
neighborhood = self.grid.get_neighborhood((4, 1), moore=True)
assert len(neighborhood) == 5
neighborhood = self.grid.get_neighborhood((0, 0), moore=False)
assert len(neighborhood) == 2
neighbors = self.grid.get_neighbors((4, 1), moore=False)
assert len(neighbors) == 0
neighbors = self.grid.get_neighbors((4, 1), moore=True)
assert len(neighbors) == 2
neighbors = self.grid.get_neighbors((1, 1), moore=False,
include_center=True)
assert len(neighbors) == 3
neighbors = self.grid.get_neighbors((3, 1), moore=False, radius=2)
assert len(neighbors) == 4
def test_coord_iter(self):
ci = self.grid.coord_iter()
# no agent in first space
first = next(ci)
assert first[0] is None
assert first[1] == 0
assert first[2] == 0
# first agent in the second space
second = next(ci)
assert second[0].unique_id == 1
assert second[0].pos == (1, 0)
assert second[1] == 1
assert second[2] == 0
class TestBaseGridTorus(TestBaseGrid):
'''
Testing the toroidal base grid.
'''
torus = True
def test_neighbors(self):
'''
Test the toroidal neighborhood methods.
'''
neighborhood = self.grid.get_neighborhood((1, 1), moore=True)
assert len(neighborhood) == 8
neighborhood = self.grid.get_neighborhood((4, 1), moore=True)
assert len(neighborhood) == 8
neighborhood = self.grid.get_neighborhood((0, 0), moore=False)
assert len(neighborhood) == 4
neighbors = self.grid.get_neighbors((4, 1), moore=False)
assert len(neighbors) == 0
neighbors = self.grid.get_neighbors((4, 1), moore=True)
assert len(neighbors) == 2
neighbors = self.grid.get_neighbors((1, 1), moore=False,
include_center=True)
assert len(neighbors) == 3
neighbors = self.grid.get_neighbors((3, 1), moore=False, radius=2)
assert len(neighbors) == 4
class TestSingleGrid(unittest.TestCase):
'''
Test the SingleGrid object.
Since it inherits from Grid, all the functionality tested above should
work here too. Instead, this tests the enforcement.
'''
def setUp(self):
'''
Create a test non-toroidal grid and populate it with Mock Agents
'''
self.grid = SingleGrid(3, 5, True)
self.agents = []
counter = 0
for y in range(3):
for x in range(5):
if TEST_GRID[y][x] == 0:
continue
counter += 1
# Create and place the mock agent
a = MockAgent(counter, None)
self.agents.append(a)
self.grid.place_agent(a, (x, y))
def test_enforcement(self):
'''
Test the SingleGrid empty count and enforcement.
'''
assert len(self.grid.empties) == 10
a = MockAgent(100, None)
with self.assertRaises(Exception):
self.grid._place_agent((1, 0), a)
# Place the agent in an empty cell
self.grid.position_agent(a)
assert a.pos not in self.grid.empties
assert len(self.grid.empties) == 9
for i in range(10):
self.grid.move_to_empty(a)
assert len(self.grid.empties) == 9
# Place agents until the grid is full
for i in range(9):
a = MockAgent(101 + i, None)
self.grid.position_agent(a)
assert len(self.grid.empties) == 0
a = MockAgent(110, None)
with self.assertRaises(Exception):
self.grid.position_agent(a)
with self.assertRaises(Exception):
self.move_to_empty(self.agents[0])
# Number of agents at each position for testing
TEST_MULTIGRID = [[0, 1, 0, 2, 0],
[0, 1, 5, 0, 0],
[0, 0, 0, 3, 0]]
class TestMultiGrid(unittest.TestCase):
'''
Testing a toroidal MultiGrid
'''
torus = True
def setUp(self):
'''
Create a test non-toroidal grid and populate it with Mock Agents
'''
self.grid = MultiGrid(3, 5, self.torus)
self.agents = []
counter = 0
for y in range(3):
for x in range(5):
for i in range(TEST_MULTIGRID[y][x]):
counter += 1
# Create and place the mock agent
a = MockAgent(counter, None)
self.agents.append(a)
self.grid.place_agent(a, (x, y))
def test_agent_positions(self):
'''
Ensure that the agents are all placed properly on the MultiGrid.
'''
for agent in self.agents:
x, y = agent.pos
assert agent in self.grid[y][x]
def test_neighbors(self):
'''
Test the toroidal MultiGrid neighborhood methods.
'''
neighborhood = self.grid.get_neighborhood((1, 1), moore=True)
assert len(neighborhood) == 8
neighborhood = self.grid.get_neighborhood((4, 1), moore=True)
assert len(neighborhood) == 8
neighborhood = self.grid.get_neighborhood((0, 0), moore=False)
assert len(neighborhood) == 4
neighbors = self.grid.get_neighbors((4, 1), moore=False)
assert len(neighbors) == 0
neighbors = self.grid.get_neighbors((4, 1), moore=True)
assert len(neighbors) == 5
neighbors = self.grid.get_neighbors((1, 1), moore=False,
include_center=True)
assert len(neighbors) == 7
neighbors = self.grid.get_neighbors((3, 1), moore=False, radius=2)
assert len(neighbors) == 11
``` |
{
"source": "joshainglis/qikfiller",
"score": 2
} |
#### File: schemas/lists/client.py
```python
from marshmallow import fields, post_load
from qikfiller.schemas.lists import (
BaseCollectionObject, BaseCollectionSchema, BaseSchema, register_class,
)
from qikfiller.schemas.lists.task import TaskSchema
class ClientSchema(BaseSchema):
LOAD_INTO = 'Client'
id = fields.Integer(required=True)
name = fields.String(required=True)
owner_id = fields.Integer(allow_none=True)
owner_name = fields.String(allow_none=True)
custom_fields = fields.List(fields.String())
tasks = fields.Nested(TaskSchema, many=True)
@post_load
def to_obj(self, data):
try:
data["custom_fields"] = '|'.join(data["custom_fields"])
except KeyError:
pass
return super(ClientSchema, self).to_obj(data)
class ClientsSchema(BaseCollectionSchema):
LOAD_INTO = 'Clients'
clients = fields.Nested(ClientSchema, many=True)
@register_class
class Clients(BaseCollectionObject):
_SCHEMA = ClientsSchema
```
#### File: qikfiller/utils/date_time.py
```python
from datetime import date, datetime, time, timedelta
from dateutil.parser import parse
def parse_time(t):
if isinstance(t, time):
return t
if isinstance(t, datetime):
# noinspection PyArgumentList
return t.time()
if isinstance(t, int):
return time(t)
try:
return parse(t).time()
except:
raise ValueError('Could not parse {} as a time'.format(t))
def to_timedelta(t):
return datetime.combine(date.min, parse_time(t)) - datetime.min
def parse_date(d):
if isinstance(d, date):
return d
if isinstance(d, datetime):
# noinspection PyArgumentList
return d.date()
try:
return parse(d, dayfirst=True)
except (ValueError, TypeError):
pass
if isinstance(d, int):
return date.today() + timedelta(days=d)
raise ValueError('Could not parse {} as a date'.format(d))
def get_start_end(date_, start, end, duration):
if start and not any([date_, end, duration]):
end = datetime.now().time()
date_ = parse_date(date_)
if start and end:
start = parse_time(start)
end = parse_time(end)
elif start and duration:
start = parse_time(start)
end = (datetime.combine(date_, start) + to_timedelta(parse_time(duration))).time()
elif end and duration:
end = parse_time(end)
start = (datetime.combine(date_, end) - to_timedelta(parse_time(duration))).time()
else:
raise ValueError("Please provide any two of start, end, duration")
if start > end:
raise ValueError('Start time {start} is after end time {end}.'.format(start=start, end=end))
return date_, start, end
```
#### File: qikfiller/utils/fields.py
```python
import sys
from qikfiller.cache.orm import Task
def get_field(session, table, field):
if table is Task:
return get_task_field(session, field)
print("validating {table} from {task_id}:".format(table=table.__tablename__, task_id=field))
if not isinstance(field, int):
fields = session.query(table).filter(table.name.ilike('%{field}%'.format(field=field))).all()
if len(fields) == 0:
print(' Could not find any {table} matching "{field}"'.format(table=table.__tablename__, field=field))
sys.exit(1)
elif len(fields) > 1:
t = [('{field_.id}'.format(field_=field_), '{field_.name}'.format(field_=field_)) for field_ in fields]
l = [max(len(x[y]) for x in t) for y in range(len(t[0]))]
for type_ in t:
print(' {{0:>{0}s}}: {{1:<{1}s}}'.format(*l).format(*type_))
field = int(input(' Please enter the id of desired {table} from above: '
.format(table=table.__class__.__name__.lower())))
else:
field = fields[0].id
print(' Got {}'.format(session.query(table).get(field)))
return field
def get_task_field(session, task_id):
print("validating Task from {task_id}:".format(task_id=task_id))
if not isinstance(task_id, int):
task_id_split = task_id.split(':')
if task_id_split[-1]:
tasks = session.query(Task) \
.filter(Task.name.ilike('%{task_id_split}%'.format(task_id_split=task_id_split[-1]))).all()
else:
tasks = session.query(Task).all()
if len(task_id_split) == 2:
tasks = [task for task in tasks if task_id_split[0].lower() in task.get_client().name.lower()]
if len(tasks) == 0:
print(' Could not find any task matching "{task_id}"'.format(task_id=task_id))
sys.exit(1)
elif len(tasks) > 1:
t = [(
'{task_id}'.format(task_id=task.id),
'{client}'.format(client=task.get_client().name),
'{task_name}'.format(task_name=task.name)
) for task in tasks]
l = [max(len(x[y]) for x in t) for y in range(len(t[0]))]
for type_ in t:
print(' {{0:>{0}s}}: {{1:<{1}s}}: {{2:<{2}s}}'.format(*l).format(*type_))
task_id = int(input(' Please enter the id of desired task from above: '))
else:
task_id = tasks[0].id
print(' Got {}'.format(session.query(Task).get(task_id)))
return task_id
``` |
{
"source": "joshainglis/Web-Development-with-Sanic",
"score": 2
} |
#### File: loggingapp1/myapp/server.py
```python
from sanic import Sanic, text
from myapp.common.log import app_logger, setup_logging
def create_app():
app = Sanic(__name__)
setup_logging(app)
@app.route("")
async def dummy(_):
app_logger.debug("This is a DEBUG message")
app_logger.info("This is a INFO message")
app_logger.warning("This is a WARNING message")
app_logger.error("This is a ERROR message")
app_logger.critical("This is a CRITICAL message")
return text("")
return app
```
#### File: booktracker/worker/redis.py
```python
import aioredis
from sanic import Sanic
app = Sanic.get_app("BooktrackerApp")
@app.before_server_start
async def setup_redis(app, _):
app.ctx.redis_pool = aioredis.BlockingConnectionPool.from_url(
app.config.REDIS_DSN, max_connections=app.config.REDIS_MAX
)
app.ctx.redis = aioredis.Redis(connection_pool=app.ctx.redis_pool)
@app.after_server_stop
async def shutdown_redis(app, _):
await app.ctx.redis_pool.disconnect()
``` |
{
"source": "joshakpeko/filetodict",
"score": 3
} |
#### File: joshakpeko/filetodict/ftodconv.py
```python
import json
def ftodict(ifile):
"""save ifile content into a dictionary object and return that
dictionary."""
dico = {}
with open(ifile, 'r', encoding='utf8') as src:
term_elmt = [] # term elements : term, explanation, examples
for line in src:
if line != '\n':
term_elmt.append(line)
elif term_elmt:
# complete with None if some elements are missing
while len(term_elmt) < 3:
term_elmt.append(None)
term, explan, examp = term_elmt
try:
term = term.strip()
except AttributeError: # in case it is None
pass
try:
explan = explan.strip()
except AttributeError:
pass
try:
examp = examp.replace(' and', ',')
examp = examp.strip('Examples:')
examp = [ex.strip() for ex in examp.split(',')]
except AttributeError:
pass
dico[term] = {
'explanation': explan,
'examples': examp
}
term_elmt = [] # reset for next sequence
return dico
# convert animegenres.txt to dictionary object and save as json
if __name__ == '__main__':
src = 'animegenres.txt'
animedict = ftodict(src)
animejson = json.dumps(animedict)
destname = src.strip('.txt') + '.json'
with open(destname, 'w') as dest:
dest.write(animejson)
``` |
{
"source": "joshakpeko/hangman_reloaded",
"score": 3
} |
#### File: joshakpeko/hangman_reloaded/entities.py
```python
import sys
from collections import namedtuple
import utilities
class Player:
"""A player is represented by its id, name, and stats."""
__session_idents = []
def __init__(self, pname):
player = utilities.get_player(pname)
if player is None:
self.ident = self.__new_ident()
self.name = pname
self.stats = Stats(self)
else:
self.ident = player.ident
self.name = player.name
self.stats = player.stats
def __repr__(self):
return self.name
def __new_ident(self):
"""Set an identification number for a new player."""
if len(self.__session_idents) == 0:
new_ident = utilities.get_max_ident() + 1
else:
new_ident = max(self.__session_idents) + 1
self.__session_idents.append(new_ident)
return new_ident
class Round:
"""A new game round"""
__lexicon = "french" # default lexicon
__max_attempts = 0xc # smile :)
__reward = 0b11 # smile again :)
def __init__(self, player):
if not isinstance(player, Player):
raise ValueError("invalid player: %s" % player)
self.suspended = True # signal to continue the round
self.player = player
self.lexicon = self.__lexicon
self.__word = "" # the word to be found (hidden)
self.__valid_chars = [] # to handle non-ascii chars
self.mask = "" # pattern revealing found letters
self.symbols = [] # list of non-alpha chars in word
self.__attempts = self.__max_attempts
self.__reward = self.__reward
self.played_chars = [set(), set()] # played chars and words
@property
def attempts(self):
return self.__attempts
@property
def stopped(self):
return self.suspended
def start(self):
"""Initialize a new word to start playing."""
if not self.suspended:
return
new_word = utilities.get_new_word(self.lexicon)
if new_word == "":
raise ValueError("no word found!")
self.__word = new_word
self.__valid_chars = utilities.decompose(new_word)
self.mask, symbols = utilities.set_mask(new_word)
self.symbols.extend(symbols)
self.suspended = False
def set_lexicon(self, new_lexicon):
"""Change session lexicon"""
self.lexicon = new_lexicon
def stop(self):
"""Cancel the ongoing round without updating user's stats."""
self.__word = ""
self.mask = ""
self.__attempts = self.__max_attempts
self.suspended = True
def play(self, chars):
"""Play function processes a single character or a
word played by the player, and update stats accordingly.
chars can be a single character or a full word."""
if self.suspended == True:
return
# reconstitute chars without in-word symbols
alphas = "".join(c for c in chars if c not in self.symbols)
# update mask to insert guessed characters
if alphas.isalpha():
self.__update_mask(chars)
# add the user's proposal to played_chars list
if len(chars) == 1:
self.played_chars[0].add(chars)
else:
self.played_chars[1].add(chars)
self.__attempts -= 1
# if end of the round, update stats and end the round
if self.mask == self.__word: # success
self.player.stats.update(self.__reward)
utilities.save_to_db(self.player) # update database
self.stop()
if self.__attempts == 0: # failure
self.stats.update(0b0)
utilities.save_to_db(self.player) # update database
self.stop()
def __update_mask(self, chars):
"""Update self.mask according to chars."""
n = len(chars)
new_mask = ""
if n == 1:
for i, pair in enumerate(self.__valid_chars):
if chars in pair:
new_mask += self.__word[i]
else:
new_mask += self.mask[i]
self.mask = new_mask
elif n > 1:
if chars == self.__word:
self.mask = self.__word
class Stats:
""" Represents a player statistics """
def __init__(self, player):
if not isinstance(player, Player):
raise ValueError("invalid player: %s" % player)
self.player_name = player.name
self.__ngames = 0 # number of played games
self.__successes = 0
self.__failures = 0
self.__points = 0
self.__level = 0 # from 5 defined level
def get_stats(self):
"""Return a nametuple of all stats"""
Stats = namedtuple("Stats", ["game_played", "successes",
"failures", "points", "level"])
return Stats(
self.__ngames, self.__successes,
self.__failures, self.__points, self.__level)
def update(self, reward):
"""Update player stats"""
self.__ngames += 1
self.__points += reward
if reward > 0:
self.__successes += 1
else:
self.__failures += 1
self.__level = utilities.get_level(
self.__ngames, self.__successes)
``` |
{
"source": "joshaldous/Football_Supporter_Recommender",
"score": 3
} |
#### File: Football_Supporter_Recommender/src/final_comparison.py
```python
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler as SScale
from sklearn.neighbors import DistanceMetric
from final_epl_eda import LeagueDFEDA
from scipy import stats
from scipy.spatial.distance import euclidean, jaccard, cosine
import time
import requests
import csv
import re
import pickle
def unpickler(file):
with open(file, 'rb') as f:
return pickle.load(f)
def pickler(input, output):
with open(output, 'wb') as f:
pickle.dump(input,f,pickle.HIGHEST_PROTOCOL)
class SimilarityDF(pd.DataFrame): # create a class for standardizing and vectorizing team stats
@property
def _constructor(self):
return SimilarityDF
def vectorizer(self,team_lst,df_type): # standardizes and vectorizes the input for all epl teams and a single nfl team
if df_type == 'EPL':
temp_df = self.set_index(['squad','season'])
elif df_type == 'NFL':
temp_df = self.set_index(['team','year'])
if len(team_lst) > 1:
stack_start = temp_df.loc[team_lst[0]]
stack_start_std = SScale().fit_transform(stack_start).ravel()
stack_start_std = stack_start_std.reshape(1,-1)
for team in team_lst[1:]:
team_df = temp_df.loc[team]
team_df_std = SScale().fit_transform(team_df).ravel()
team_df_std = team_df_std.reshape(1,-1)
stack_start_std = np.concatenate((stack_start_std,team_df_std),axis=0)
else:
stack_start = temp_df.loc[team_lst]
stack_start_std = SScale().fit_transform(stack_start).ravel()
stack_start_std = stack_start_std.reshape(1,-1)
return stack_start_std
class Distances(): # create a class to calculate the distances between vectors for recommendations
def __init__(self,team_vector,league_matrix,weights=None):
self.team_vector = team_vector
self.league_matrix = league_matrix
self.weights = weights
def euclidean_dist_calc(self,weights): # calculates the euclidean distance
weights = self.weights
mat_shape = self.league_matrix.shape
if not weights:
weights = np.ones((1,mat_shape[1]))
if self.league_matrix.shape[0] > 1:
euc_dist = euclidean(self.team_vector,np.matrix(self.league_matrix[0]),weights)
for u in np.matrix(self.league_matrix[1:]):
euc = euclidean(self.team_vector,u,weights)
euc_dist = np.hstack((euc_dist,euc))
else:
euc_dist = euclidean(self.team_vector,self.league_matrix,weights)
return euc_dist
def cosine_sim_calc(self): # calculates the cosine similarity (not used)
mat_shape = self.league_matrix.shape
if self.league_matrix.shape[0] > 1:
cos_start = np.dot(self.team_vector,np.matrix(self.league_matrix[0]).T)/(np.linalg.norm(self.team_vector) *
np.linalg.norm(np.matrix(self.league_matrix[0])))
cos_sim = 0.5 + 0.5 * cos_start
for u in np.matrix(self.league_matrix[1:]):
cos_cont = np.dot(self.team_vector,u.T)/(np.linalg.norm(self.team_vector) * np.linalg.norm(u))
cos_append = 0.5 + 0.5 * cos_cont
cos_sim = np.hstack((cos_sim,cos_append))
else:
costheta = np.dot(self.team_vector,self.league_matrix.T)/(np.linalg.norm(self.team_vector) *
np.linalg.norm(self.league_matrix.T))
cos_sim = 0.5 + 0.5 * costheta
return cos_sim
def cosine_dist_calc(self,weights): # calculates the cosine distance
weights = self.weights
mat_shape = self.league_matrix.shape
if weights == None:
weights = np.ones((self.league_matrix.shape[1]))
if mat_shape[0] > 1:
cos_dist = cosine(self.team_vector,np.matrix(self.league_matrix[0]).T,weights)
for u in np.matrix(self.league_matrix[1:]):
cos_cont = cosine(self.team_vector,u.T,weights)
cos_dist = np.hstack((cos_dist,cos_cont))
else:
cos_dist = cosine(self.team_vector,self.league_matrix,weights)
return cos_dist
def jaccard_dist_calc(self,weights): # calculates the jaccard distance (not used)
weights = self.weights
mat_shape = self.league_matrix.shape
if not weights:
weights = np.ones((1,mat_shape[1]))
if mat_shape[0] > 1:
jac_dist = jaccard(self.team_vector,np.matrix(self.league_matrix[0]),weights)
for u in np.matrix(self.league_matrix[1:]):
jac_cont = jaccard(self.team_vector,u,weights)
jac_dist = np.hstack((jac_dist,jac_cont))
else:
jac_dist = jaccard(self.team_vector,self.league_matrix,weights)
return jac_dist
def top_dists(self,distance_calc,index_,col,number,weights=None): # creates an output for distance calculation with teams to compare
weights = self.weights
if distance_calc == 'euclidean':
dist = self.euclidean_dist_calc(weights).reshape(-1,1)
elif distance_calc == 'cosine_dist':
dist = self.cosine_dist_calc(weights)
elif distance_calc == 'cosine_sim':
dist = self.cosine_sim_calc().reshape(-1,1)
else:
dist = self.jaccard_dist_calc(weights).reshape(-1,1)
df = pd.DataFrame(dist,index=index_,columns=[col])
top = df.sort_values(by=col,ascending=True)
return top[:number]
if __name__ == '__main__':
dom_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df_clean_update.pickle') # unpickles the epl team data
nfl_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_df_clean_update.pickle') # unpickles the nfl team data
dom_df.drop(['country','mp','w','wins','losses','gdiff','dom_draw%','lg_finish', # drops columns from the epl data deemed to not be useful
'dom_comp_lvl','d','l','pts','attendance'],axis=1,inplace=True)
nfl_df.drop(['wins','losses','ties','off_rank_yards','def_rank_yards','turnover_ratio_rank','off_rank_pts', # drops columns from the nfl data that is deemed to not be useful
'def_rank_pts','pt_diff_rank','division_finish', 'points_for','yard_diff_rank','margin_of_vict',
'point_difference','playoff_finish','strgth_sched','srs','tie%','off_srs','def_srs'],axis=1,inplace=True)
nfl_cols = nfl_df.columns.tolist()
nfl_cols = ['team','tds','tds_allowed','year','playoffs','win%','loss%'] # reorders columns to match the epl data
nfl_df = nfl_df.reindex(columns = nfl_cols)
pickler(dom_df,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/EPL_to_vector_update.pickle') # saves epl data to be standardized
pickler(nfl_df,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_to_vector_update.pickle') # saves nfl data to be standardized
epl_vec = SimilarityDF(dom_df).vectorizer(dom_df.squad.unique(),'EPL') # creates an array of standardized epl team data
pickler(epl_vec,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_dists/epl_vectorized_update.pickle') # saves vectorized epl data
# dom_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/EPL_to_vector_update.pickle')
# nfl_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_to_vector_update.pickle')
epl_mat = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_dists/epl_vectorized_update.pickle')
weights = [0.08,0.05,0.093,0.08,0.03] # weights used in distance calculations
w = weights*3
## CARDINALS ##
print('CARDINALS')
team_vec = SimilarityDF(nfl_df).vectorizer(['Cardinals'],'NFL')
team = Distances(team_vec,epl_mat,w)
team_euc_top = team.top_dists(distance_calc='euclidean',index_=dom_df.squad.unique(),col='euc_dist',number=None)
team_cos_dist_top = team.top_dists(distance_calc='cosine_dist',index_=dom_df.squad.unique(),col='cos_dist',number=None)
print(team_euc_top[:5])
print(team_cos_dist_top[:5])
team_comp_start = [1 if x == y else 0 for x,y in zip(team_euc_top.index,team_cos_dist_top.index)]
## FALCONS ##
print('FALCONS')
team_vec = SimilarityDF(nfl_df).vectorizer(['Falcons'],'NFL')
team = Distances(team_vec,epl_mat,w)
team_euc_top = team.top_dists(distance_calc='euclidean',index_=dom_df.squad.unique(),col='euc_dist',number=None)
team_cos_dist_top = team.top_dists(distance_calc='cosine_dist',index_=dom_df.squad.unique(),col='cos_dist',number=None)
print(team_euc_top[:5])
print(team_cos_dist_top[:5])
team_comp_cont = [1 if x == y else 0 for x,y in zip(team_euc_top.index,team_cos_dist_top.index)]
team_comp_start = np.hstack((team_comp_start,team_comp_cont))
pickler(team_comp_start,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/nfl_dists/team_comp.pickle')
```
#### File: Football_Supporter_Recommender/src/final_scrape.py
```python
import numpy as np
import pandas as pd
import sqlite3
import datetime as dt
from bs4 import BeautifulSoup as BS
from os.path import basename
import time
import requests
import csv
import re
import pickle
def name_location_scrapper(url): # scrapes a list of teams and their urls
r = requests.get(url)
soup = BS(r.content,'html.parser')
tables = soup.find_all('table')
table_body = tables[0].find_all('tbody')
body_tds = table_body[0].find_all('td',attrs={'data-stat':'squad'})
team_link = []
team_name = []
for row in body_tds:
teams = row.find_all('a')
for team in teams:
team_link.append(team['href'])
team_name.append(team.text)
return team_link, team_name
def epl_link_cleaner(lst_of_team_urls,team_name): # reworks epl team website endings to complete urls
team_urls = [x.split('/') for x in lst_of_team_urls]
team_links = ['https://fbref.com/en/squads/'+x[3]+'/history/'+y+'-Stats-and-History'
for x,y in zip(team_urls,team_name)]
return team_links
def pickler(input, output): # pickles variables needing saving
with open(output, 'wb') as f:
pickle.dump(input,f,pickle.HIGHEST_PROTOCOL)
def unpickler(file): # unpickles those variables
with open(file, 'rb') as f:
return pickle.load(f)
def team_domestic_league_df_creator(lst): # starts epl team statistics tables
url = lst[0]
r = requests.get(url)
soup = BS(r.content,'html.parser')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
tabs = tables[:3]
df_dict = {}
for table in range(len(tabs)):
bodys = tabs[table].find_all('tbody')
heads = tabs[table].find_all('thead')
for head in heads:
hds = head.find_all('th')
cols = [hd.text for hd in hds[1:]]
rows = bodys[0].find_all('tr')
data = []
seasons = []
for row in rows:
row_tds = row.find_all('td')
yrs = row.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
r = [rtd.text for rtd in row_tds]
data.append(r)
seasons.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = seasons
df_dict[table] = df
pickler(df_dict,'df_dict.pickle')
return df_dict
def team_df_appender(lst,dom_df,icup_df,dcup_df): # appends epl team by team statistics to the table created above
for site in lst[1:]:
url = site
r = requests.get(url)
print(url)
soup = BS(r.content,'lxml')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
df_dict = {}
caption_text = []
for tab in tables:
cap = tab.select('caption')
for c in cap:
caption_text.append(c.get_text(strip=True))
for tabs,caps in zip(range(len(tables)),caption_text):
df_dict[caps] = tables[tabs]
for table_name in df_dict.keys():
bodys = df_dict[table_name].find_all('tbody')
heads = df_dict[table_name].find_all('thead')
for head in heads:
hds = head.find_all('th')
cols = [hd.text for hd in hds[1:]]
rows = bodys[0].find_all('tr')
seasons = []
data = []
for row in rows:
row_tds = row.find_all('td')
yrs = row.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
r = [rtd.text for rtd in row_tds]
data.append(r)
seasons.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = seasons
if table_name == 'Domestic Leagues Results Table':
try:
dom_df = pd.concat([dom_df,df],axis=0,join='outer')
dom_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df_working.pickle' # saves progress in case of
# connection issues
pickler(dom_df,dom_file)
except:
print(f'{url} dom_league passed!! Try again')
elif table_name == 'International Cup Results Table':
try:
icup_df = pd.concat([icup_df,df],axis=0,join='outer')
icup_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_intnl_cup_df_working.pickle'
pickler(icup_df,icup_file)
except:
print(f'{url} icup passed!! Try again')
elif table_name == 'Domestic Cup Results Table':
try:
dcup_df = pd.concat([dcup_df,df],axis=0,join='outer')
dcup_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_cup_df_working.pickle'
pickler(dcup_df,dcup_file)
except:
print(f'{url} dcup passed!! Try again')
wait = np.random.randint(5,size=1)
time.sleep(wait)
return dom_df, icup_df, dcup_df
def nfl_team_df_creator(lst): # starts nfl team statistics table
url = lst[0]
r = requests.get(url)
soup = BS(r.content,'html.parser')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
tbod = soup.find_all('tbody')
thead = soup.find_all('thead')
head_rows = thead[0].find_all('tr')
for hr in head_rows:
hds = hr.find_all('th')
cols = [hd.text for hd in hds[1:]]
trows = tbod[0].find_all('tr')
data = []
y_played = []
for tr in trows[:22]: # takes table rows 2020 - 2002
tds = tr.find_all('td')
yrs = tr.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
row = [td_.text for td_ in tds]
data.append(row)
y_played.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = y_played
return df
def nfl_df_appender(df_to_append,lst): # appends nfl team by team statistics to the nfl table created
for site in lst[1:]:
url = site
print(url)
r = requests.get(url)
soup = BS(r.content,'html.parser')
badge = soup.find_all('img',attrs={'class':'teamlogo'})
badge_pic = badge[0]['src']
with open(basename(badge_pic),'wb') as f:
f.write(requests.get(badge_pic).content)
tables = soup.find_all('table')
tbod = soup.find_all('tbody')
thead = soup.find_all('thead')
head_rows = thead[0].find_all('tr')
for hr in head_rows:
hds = hr.find_all('th')
cols = [hd.text for hd in hds[1:]]
trows = tbod[0].find_all('tr')
data = []
y_played = []
for tr in trows[:22]:
tds = tr.find_all('td')
yrs = tr.find_all('th',attrs={'scope':'row'})
yr = [y.text for y in yrs]
row = [td_.text for td_ in tds]
data.append(row)
y_played.append(yr)
df = pd.DataFrame(data,columns=cols)
df['year'] = y_played
df_to_append = df_to_append.append(df)
pickler(df_to_append,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/working_nfl_df.pickle')
wait = np.random.randint(5,size=1)
time.sleep(wait)
return df_to_append
if __name__ == '__main__':
team_link, team_name = name_location_scrapper('https://fbref.com/en/players/') # creates epl team url locations
team_links = epl_link_cleaner(team_link,team_name) # cleans url locations to connectable website pages
pickler(team_links,'team_links.pickle') # saves the names and urls of epl teams
team_urls = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data//pickles/epl_team_links.pickle')
team_urls = [x.replace(' ','-') for x in team_urls] # fixes an issue with teams with 2 names to replace the space between names with a dash
team_start_df = team_domestic_league_df_creator(team_urls) # starts epl team stats table
team_starter_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_df_dict.pickle')
domestic_df = team_starter_df[0] # creates a variable for one of the 3 tables scraped for each team
intnl_cup_df = team_starter_df[1] # creates a variable for one of the 3 tables scraped for each team
dom_cup_df = team_starter_df[2] # creates a variable for one of the 3 tables scraped for each team
dom_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df.pickle' # saves the epl team domestic league stats table
d_lg_df = unpickler(dom_file)
int_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_intnl_cup_df.pickle' # saves the epl team international cup stats table
i_cp_df = unpickler(int_file)
domcup_file = '/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_cup_df.pickle' # saves the epl team domestic cup stats table
d_cp_df = unpickler(domcup_file)
domestic_df, intnl_cup_df, dom_cup_df = team_df_appender(lst=team_urls,dom_df=domestic_df,icup_df=intnl_cup_df,dcup_df=dom_cup_df) # fills out the 3 started tables
dom_full = r'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df_full.pickle'
icup_full = r'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_intnl_cup_df_full.pickle'
dcup_full = r'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_cup_df_full.pickle'
pickler(domestic_df,dom_full)
pickler(intnl_cup_df,icup_full)
pickler(dom_cup_df,dcup_full)
nfl_team_urls = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_team_links.pickle') # starts nfl team stats table
nfl_start_df = nfl_team_df_creator(nfl_team_urls)
pickler(nfl_start_df,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_start_df.pickle')
nfl_start_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_start_df.pickle') # finish nfl team stats table
nfl_df = nfl_df_appender(nfl_start_df,nfl_team_urls)
pickler(nfl_start_df,'/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_df.pickle')
```
#### File: src/web_app/app.py
```python
import flask
import sys
sys.path.append('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/src/')
import final_comparison
import final_epl_eda
from final_comparison import pickler, unpickler, SimilarityDF, Distances
from final_epl_eda import LeagueDFEDA
import pickle
app = flask.Flask(__name__)
PORT = 8105
REGISTER_URL = "http://localhost:5000/index"
DATA = []
TIMESTAMP = []
img_dict = {'Arsenal':'epl/18bb7c10.png','Aston Villa':'epl/8602292d.png','Bournemouth':'epl/4ba7cbea.png',
'Brighton':'epl/d07537b9.png','Burnley':'epl/943e8050.png','Chelsea':'epl/cff3d9bb.png',
'Crystal Palace':'epl/47c64c55.png','Everton':'epl/d3fd31cc.png','Leicester City':'epl/a2d435b3.png',
'Liverpool':'epl/822bd0ba.png','Manchester City':'epl/b8fd03ef.png','Manchester Utd':'epl/19538871.png',
'Newcastle Utd':'epl/b2b47a98.png','Norwich City':'epl/1c781004.png','Sheffield Utd':'epl/1df6b87e.png',
'Southampton':'epl/33c895d4.png','Tottenham':'epl/361ca564.png','Watford':'epl/2abfe087.png',
'West Ham':'epl/7c21e445.png','Wolves':'epl/8cec06e1.png'}
url_dict = {'Arsenal':'https://www.arsenal.com','Aston Villa':'https://avfc.co.uk','Bournemouth':'https://afcb.co.uk',
'Brighton':'https://brightonandhovealbion.com','Burnley':'https://burnleyfootballclub.com','Chelsea':'https://chelseafc.com',
'Crystal Palace':'https://cpfc.co.uk','Everton':'https://evertonfc.com','Leicester City':'https://lcfc.com',
'Liverpool':'https://liverpoolfc.com','Manchester City':'https://mancity.com','Manchester Utd':'https://www.manutd.com',
'Newcastle Utd':'https://nufc.co.uk','Norwich City':'https://canaries.co.uk','Sheffield Utd':'https://sufc.co.uk',
'Southampton':'https://southamptonfc.com','Tottenham':'https://tottenhamhotspur.com','Watford':'https://watfordfc.com',
'West Ham':'https://whufc.com','Wolves':'https://wolves.co.uk'}
goal_dict = {'Arsenal':'https://www.youtube.com/watch?v=yO31QsfmnRE','Aston Villa':'https://www.youtube.com/watch?v=2YMJPhN3wnc',
'Bournemouth':'https://www.youtube.com/watch?v=c9VjOPnCDC0','Brighton':'https://www.youtube.com/watch?v=8-jvCuZ7Sc8',
'Burnley':'https://www.youtube.com/watch?v=mcfqxZIb6do','Chelsea':'https://www.youtube.com/watch?v=RXeT0RKb1X0',
'Crystal Palace':'https://www.youtube.com/watch?v=URWKcozh4T0','Everton':'https://www.youtube.com/watch?v=1mE-OvnWoaI',
'Leicester City':'https://www.youtube.com/watch?v=HMijNgNnqHc','Liverpool':'https://www.youtube.com/watch?v=BO0uy4CivMM',
'Manchester City':'https://www.youtube.com/watch?v=z61dIkxmPWw','Manchester Utd':'https://www.youtube.com/watch?v=-Q3tBEysdsQ',
'Newcastle Utd':'https://www.youtube.com/watch?v=kLhcPacf3Ww','Norwich City':'https://www.youtube.com/watch?v=ymsMqP0f2bk',
'Sheffield Utd':'https://www.youtube.com/watch?v=U9Tjk-AsLps','Southampton':'https://www.youtube.com/watch?v=24U454qJwLg',
'Tottenham':'https://www.youtube.com/watch?v=f8MxdAkf4xY','Watford':'https://www.youtube.com/watch?v=988yKC-odmM',
'West Ham':'https://www.youtube.com/watch?v=hxMg3wswzZU','Wolves':'https://www.youtube.com/watch?v=ICyVBpnSOfQ'}
app.secret_key = '<KEY>'
@app.route('/', methods = ['GET','POST'])
def index():
return flask.render_template('index.html')
# @app.route('/search', methods = ['GET','POST'])
# def search():
@app.route('/predict', methods = ['GET','POST'])
def predict ():
epl_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_domestic_league_df_clean_update.pickle')
epl_mat = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/epl_dists/epl_vectorized_update.pickle')
nfl_df = unpickler('/home/josh/Documents/dsi/caps/cap3/Football_Supporter_Recommender/data/pickles/NFL_to_vector_update.pickle')
team = flask.request.form['team']
team_vec = SimilarityDF(nfl_df).vectorizer([team],'NFL')
selected = Distances(team_vec,epl_mat)
team_cos_top = selected.top_dists(distance_calc='cosine_dist',index_=epl_df.squad.unique(),col='cos_dist',number=3)
idx = [x for x in team_cos_top.index]
team1 = idx[0]
team2 = idx[1]
team3 = idx[2]
flask.session['team1'] = team1
flask.session['team2'] = team2
flask.session['team3'] = team3
team1_img = img_dict[team1]
team1_info = url_dict[team1]
team1_goals = goal_dict[team1]
team2_img = img_dict[team2]
team2_info = url_dict[team2]
team2_goals = goal_dict[team2]
team3_img = img_dict[team3]
team3_info = url_dict[team3]
team3_goals = goal_dict[team3]
return flask.render_template('index_album.html', team1=team1, team1_img=team1_img, team1_info=team1_info, team1_goals=team1_goals,
team2=team2, team2_img=team2_img, team2_info=team2_info, team2_goals=team2_goals,
team3=team3, team3_img=team3_img, team3_info=team3_info, team3_goals=team3_goals)
@app.route('/info', methods = ['GET','POST'])
def info():
team1 = flask.session.get('team1',None)
team2 = flask.session.get('team2',None)
team3 = flask.session.get('team3',None)
team_url = flask.request.form['team']
print(team_url)
team_, site = team_url.split('_')
print(team_)
print(site)
if site == 'info':
if team_ == 'team1':
url = url_dict[team1]
elif team_ == 'team2':
url = url_dict[team2]
elif team_ == 'team3':
url = url_dict[team3]
elif site == 'goals':
if team_ == 'team1':
url = goal_dict[team1]
elif team_ == 'team2':
url = goal_dict[team2]
elif team_ == 'team3':
url = goal_dict[team3]
return flask.redirect(url,code=302)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=PORT, debug=True)
``` |
{
"source": "joshaller/Sniff_To_Analytics",
"score": 2
} |
#### File: joshaller/Sniff_To_Analytics/monitor_packets.py
```python
import fileinput
import urllib2
import sys
import os
import time
import pickle
import shutil
import json
from firebase import firebase
from firebase import jsonutil
import threading
#
# Settings
#
reporting_interval = 5 # secs
minimum_record_interval = 5 * 60 # secs
db_name = 'state.p'
tmp_db_name = 'state.tmp'
graph_interp = 'spline'
smoothness = 0.5
#
# Globals
#
state = { 'mac_last_seen' : {}, 'samples' : [] }
lock = threading.Lock()
#
# Utility
#
def json_format(dict):
return json.dumps(dict, sort_keys=True, indent=4, separators=(',', ': '))
#
# Dictionary for rolling data graph.
#
def graph_json(state, reporting_period_start, reporting_period_end, reporting_samples):
#
## Generate samples needed in graph.
#
samples = []
# Widened match distance to average samples.
match_dist = smoothness * (reporting_period_end - reporting_period_start) / reporting_samples
#
# Find closest recorded sample for each reporting point.
#
for i in range(0,reporting_samples):
## parameterize 0..1 along time line.
u = float(i) / (reporting_samples-1)
interp_time = (1-u) * reporting_period_start + u * reporting_period_end
## search for best match to desired time.
best_dt = 1e+20
#best_value = None
value_sum = 0
value_count = 0
for trial in state['samples']:
dt = abs(trial['time'] - interp_time)
if dt < match_dist:
value_sum += trial['unique-visitors-last-hour']
value_count += 1
#if dt < match_dist and dt < best_dt:
#best_dt = dt
#best_value = trial['unique-visitors-last-hour']
## add closest or None to represent the sample.
#samples.append(best_value)
if value_count > 0:
samples.append(round(float(value_sum) / value_count, 1))
else:
samples.append(None)
## graph time is msecs since 1970
point_start = int(reporting_period_start * 1000)
point_interval = int(1000 * (reporting_period_end - reporting_period_start) / (reporting_samples-1))
graph = {
'chart' : {
'type': graph_interp
},
'title': {
'text': 'Peacock Lane Pedestrian Traffic'
},
'xAxis': {
'type': 'datetime',
'dateTimeLabelFormats': {
'day': '%b %e',
'hour': '%l%p'
},
'labels': {
'overflow': 'justify'
}
},
'yAxis': {
'title': {
'text': 'Visitors / Hour'
},
'min': 0,
'minorGridLineWidth': 1,
'gridLineWidth': 1,
'alternateGridColor': None,
},
'tooltip': {
'valueSuffix': ' visitors/hour'
},
'plotOptions': {
graph_interp: {
'animation' : False,
'lineWidth': 4,
'states': {
'hover': {
'lineWidth': 5
}
},
'marker': {
'enabled': False
},
'pointInterval': point_interval,
'pointStart': point_start
}
},
'series': [{
'name': 'Traffic',
'data': samples
}]
}
return json_format(graph)
#
# 72 hour highcharts graph (json)
#
def graph_json_72_hours():
now = time.time()
period = 3600 * 72
reporting_samples = 100 # one per hour
reporting_period_end = now
reporting_period_start = reporting_period_end - period
json = graph_json(state, reporting_period_start, reporting_period_end, reporting_samples)
return json
def graph_json_24_hours():
now = time.time()
period = 3600 * 24
reporting_samples = 100 # one per hour
reporting_period_end = now
reporting_period_start = reporting_period_end - period
json = graph_json(state, reporting_period_start, reporting_period_end, reporting_samples)
return json
#
#
#
def record_ga(mac_addr, tracking_id, type):
client_id = mac_addr
page = '%2F' + type + '%20' + mac_addr
url = 'http://www.google-analytics.com/collect?v=1&tid=%s&cid=%s&t=pageview&dp=%s' % (tracking_id, client_id, page)
response = urllib2.urlopen(url)
#
#
#
def reportAnalytics():
global firebase_api
lock.acquire()
visitor_count_1_hour = 0
visitor_count_24_hour = 0
total_visitors = len(state['mac_last_seen'])
now = time.time()
for mac in state['mac_last_seen']:
age = now - state['mac_last_seen'][mac]
if age < 3600:
visitor_count_1_hour = visitor_count_1_hour + 1
if age < (24*3600):
visitor_count_24_hour = visitor_count_24_hour + 1
# Update Firebase.
if firebase_api:
#try:
if 1:
firebase_api.put('/', 'last_update', time.asctime())
firebase_api.put('/', 'unique_visitors_last_hour', visitor_count_1_hour)
firebase_api.put('/', 'unique_visitors_last_day', visitor_count_24_hour)
firebase_api.put('/', 'total_visitors', total_visitors)
firebase_api.put('/', 'graph_72_hours', graph_json_72_hours())
firebase_api.put('/', 'graph_24_hours', graph_json_24_hours())
#except:
# print 'Unable to push analytics to firebase.'
## Record sample to our local dictionary.
sample = {
'time' : time.time(),
'unique-visitors-last-hour' : visitor_count_1_hour,
'total-visitors' : total_visitors
}
if state.has_key('samples'):
if now - state['samples'][-1]['time'] > minimum_record_interval:
state['samples'].append(sample)
else:
state['samples'] = [ sample ]
## Save pickled copy of master list in case we crash.
pickle.dump(state, open(tmp_db_name, 'wb'))
os.rename(tmp_db_name, db_name)
## Report again after delay.
threading.Timer(reporting_interval, reportAnalytics, ()).start()
lock.release()
if __name__ == '__main__':
global firebase_api, type
firebase_id = sys.argv[1]
type = sys.argv[2]
print "firebase_id set to '%s'" % firebase_id
sys.stdout.flush()
# load previously saved db
try:
state = pickle.load(open(db_name, 'rb'))
except:
print 'problem with pickle file.'
pass
print 'starting with %d macs loaded' % len(state['mac_last_seen'])
sys.stdout.flush()
# Configure firebase.
firebase_api = firebase.FirebaseApplication('https://%s.firebaseio.com' % firebase_id, authentication=None)
if not firebase_api:
print 'Cannot initialize firebase.'
sys.stdout.flush()
# register start time.
try:
firebase_api.put('/', 'last_reboot', time.asctime())
print 'Reboot time set in firebase.'
except:
print 'Unable to set reboot time -- problem reaching firebase.'
# get GA tracking ID (if enabled).
try:
ga_tracking_id = firebase_api.get('/', 'GA-tracking-id')
print 'Google Analytics tracking started: %s' % ga_tracking_id
except:
ga_tracking_id = None
print 'No Google Analytics tracking ID found.'
sys.stdout.flush()
# Start background thread for reporting rolling stats.
threading.Timer(reporting_interval, reportAnalytics, ()).start()
# Input loop, respond to every tshark line ouput.
while True:
try:
line=raw_input()
print line
sys.stdout.flush()
except:
print 'log_packet: exiting at EOF.'
os._exit(-1)
try:
part = line.split()
strength = float(part[0])
mac_addr = part[1]
except:
continue
# Record to GA.
if ga_tracking_id:
record_ga(mac_addr, ga_tracking_id, type)
# Update our list.
lock.acquire()
state['mac_last_seen'][mac_addr] = time.time()
lock.release()
``` |
{
"source": "joshamilton/Hamilton_acI_2016",
"score": 2
} |
#### File: code/orthoMCL/05parseCOGs.py
```python
from Bio import SeqIO
from collections import Counter
import pandas as pd
import re
#%%#############################################################################
### User-defined files and folder structure
################################################################################
genomeFolder = '../genomes/faa'
resultsFolder = '../results'
#%%#############################################################################
### Create a hash for mapping names to taxon IDs
### Create a dataFrame to store the results
################################################################################
taxonDict = {}
with open('taxonMapping.txt') as dictFile:
for line in dictFile:
(val, key) = line.split()
taxonDict[key] = val
inCogDict = {}
with open(resultsFolder+'/groups.txt') as dictFile:
for line in dictFile:
(key, val) = line.split(':')
inCogDict[key] = val
groupNum = len(inCogDict)
with open(resultsFolder+'/singletons.txt') as singletonFile:
for line in singletonFile:
key = 'group'+str(groupNum).zfill(5)
inCogDict[key] = line
groupNum = groupNum + 1
outCogFrame = pd.DataFrame(index=inCogDict.keys(), columns=taxonDict.values())
#%%#############################################################################
### Parse the inCogDict
### For each key, split the line on whitespace
### For each element, split along the '|'
### Use the prefix to look up the genome in taxonDict
### Write the results to the appropriate dataFrame element
################################################################################
for key in inCogDict.keys():
cogList = inCogDict[key].split()
for cogLocus in cogList:
code = cogLocus.split('|')[0]
locus = cogLocus.split('|')[1]
# locus = locus.split('.')[2]+'.'+locus.split('.')[3]
# Check if field already exists
if pd.isnull(outCogFrame.loc[key, taxonDict[code]]):
outCogFrame.loc[key, taxonDict[code]] = locus
else:
outCogFrame.loc[key, taxonDict[code]] = outCogFrame.loc[key, taxonDict[code]]+';'+locus
outCogFrame.to_csv(resultsFolder+'/cogTable.csv')
#%%#############################################################################
### Assign annotations to COGs
### For each genome (column of outCogFrame), read the annotation information
### into a hash.
### For each cog in that genome, look up the annotation and assign it to
### cogAnnotFrame
################################################################################
cogAnnotFrame = outCogFrame.copy()
for genome in outCogFrame.columns:
# Create the annotation hash
annotHash = {}
inFile = open(genomeFolder+'/'+genome+'.faa', 'r')
for record in SeqIO.parse(inFile, 'fasta'):
locus = record.description.split()[1]
# locus = locus.split('.')[2]+'.'+locus.split('.')[3]
annotation = record.description.split()[2:]
annotation = ' '.join(annotation)
if not annotation:
annotation = 'None Provided'
annotHash[locus] = annotation
for index in cogAnnotFrame.index:
print('Processing genome: '+genome+' and locus: '+index)
if not pd.isnull(cogAnnotFrame.loc[index, genome]):
locusList = cogAnnotFrame.loc[index, genome].split(';')
annotList = []
for locus in locusList:
annotList.append(annotHash[locus])
cogAnnotFrame.loc[index, genome] = annotList
inFile.close()
cogAnnotFrame.to_csv(resultsFolder+'/annotTable.csv')
#%%#############################################################################
### Extract uniqe and consensus annotations
### Create a empty DF indexed by groups
### For each group, extract unique annotations and drop 'nan'
### Add to dataframe
################################################################################
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
annotSummary = pd.DataFrame(index=inCogDict.keys(), columns=['Annotations'])
for group in cogAnnotFrame.index:
annotMess = cogAnnotFrame.loc[group].tolist()
annotList = flatten(annotMess)
annotList = [annot for annot in annotList if str(annot) != 'nan']
annotList = list(set(annotList))
annotList = '; '.join(annotList)
annotSummary.loc[group] = annotList.strip(';')
annotSummary.to_csv(resultsFolder+'/annotSummary.csv')
# Create dataframe for consensus annotations
annotConsensus = pd.DataFrame(index=inCogDict.keys(), columns=['Annotation'])
for cog in annotConsensus.index:
annotList = []
for genome in cogAnnotFrame.columns:
if not pd.isnull(cogAnnotFrame.loc[cog][genome]):
innerString = cogAnnotFrame.loc[cog][genome]
# Dataframe element is a string enclosed in brackets with a comma separating elements
innerString = re.sub('[\[\]]' , '', innerString)
innerList = re.split('\', \'|\", \"', innerString)
innerList = [re.sub('\"|\'', '', string) for string in innerList]
annotList = annotList + innerList
# Find the most common
annotCounter = Counter(annotList)
majorityAnnot = annotCounter.most_common(1)[0][0]
majorityAnnotCount = annotCounter.most_common(1)[0][1]
majorityAnnotCon = majorityAnnotCount / len(annotList)
# Assign the Annotation
annotConsensus.set_value(cog, 'Annotation', majorityAnnot)
annotConsensus.set_value(cog, 'Confidence', majorityAnnotCon)
annotConsensus.to_csv(resultsFolder+'/annotConsensus.csv')
``` |
{
"source": "joshand/adaptive-policy-sync",
"score": 3
} |
#### File: joshand/adaptive-policy-sync/ise.py
```python
import json
import os
import re
from furl import furl
from datetime import datetime, timedelta
import requests
from urllib3.exceptions import HeaderParsingError
import time
base_dir = os.path.dirname(__file__)
class InvalidMacAddress(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ERS(object):
def __init__(self, ise_node, ers_user, ers_pass, verify=False, disable_warnings=False, use_csrf=False, timeout=2,
protocol='https'):
"""
Class to interact with Cisco ISE via the ERS API.
:param ise_node: IP Address of the primary admin ISE node
:param ers_user: ERS username
:param ers_pass: ERS password
:param verify: Verify SSL cert
:param disable_warnings: Disable requests warnings
:param timeout: Query timeout
"""
self.ise_node = ise_node
self.user_name = ers_user
self.user_pass = <PASSWORD>
self.protocol = protocol
self.url_base = '{0}://{1}:9060/ers'.format(self.protocol, self.ise_node)
self.ise = requests.sessions.Session()
self.ise.auth = (self.user_name, self.user_pass)
# http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification
self.ise.verify = verify
self.disable_warnings = disable_warnings
self.use_csrf = use_csrf
self.csrf = None
self.csrf_expires = None
self.timeout = timeout
self.ise.headers.update({'Connection': 'keep_alive'})
if self.disable_warnings:
requests.packages.urllib3.disable_warnings()
@staticmethod
def _mac_test(mac):
"""
Test for valid mac address.
:param mac: MAC address in the form of AA:BB:CC:00:11:22
:return: True/False
"""
if mac and re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:
return True
else:
return False
@staticmethod
def _sgt_name_test(name):
"""
Test for valid name.
:param name: Name; must not be null, must be <= 32 char, alphanumeric + _ only.
:return: True/False
"""
if name and re.search(r'^[a-zA-Z0-9_]*$', name) is not None and len(name) <= 32:
return True
else:
return False
@staticmethod
def _sgacl_name_test(name):
"""
Test for valid name.
:param name: Name; must start with letter; alphanumeric + _ only.
:return: True/False
"""
if name and re.search(r'^[a-zA-Z][a-zA-Z0-9_]*$', name) is not None:
return True
else:
return False
@staticmethod
def _oid_test(id):
"""
Test for a valid OID
:param id: OID in the form of abcd1234-ef56-7890-abcd1234ef56
:return: True/False
"""
if id and re.match(r'^([a-f0-9]{8}-([a-f0-9]{4}-){3}[a-z0-9]{12})$', id):
return True
else:
return False
@staticmethod
def _pass_ersresponse(result, resp):
try:
rj = resp.json()
if "SearchResult" in rj:
result['response'] = None
else:
result['response'] = rj['ERSResponse']['messages'][0]['title']
result['error'] = resp.status_code
return result
except ValueError:
if '<title>HTTP Status 401 – Unauthorized</title>' in resp.text:
result['response'] = 'Unauthorized'
result['error'] = resp.status_code
return result
else:
result['error'] = resp.status_code
return result
def _request(self, url, method="get", data=None):
if self.use_csrf:
if not self.csrf_expires or not self.csrf or datetime.utcfromtimestamp(0) > self.csrf_expires:
self.ise.headers.update({'ACCEPT': 'application/json', 'Content-Type': 'application/json',
'X-CSRF-TOKEN': 'fetch'})
resp = self.ise.get('{0}/config/deploymentinfo/versioninfo'.format(self.url_base))
self.csrf = resp.headers["X-CSRF-Token"]
self.csrf_expires = datetime.utcfromtimestamp(0) + timedelta(seconds=60)
self.ise.headers.update({'ACCEPT': 'application/json', 'Content-Type': 'application/json',
'X-CSRF-TOKEN': self.csrf})
try:
req = self.ise.request(method, url, data=data, timeout=self.timeout)
except HeaderParsingError:
print("Received HeaderParsingError; Trying again...")
time.sleep(5)
req = self.ise.request(method, url, data=data, timeout=self.timeout)
else:
try:
req = self.ise.request(method, url, data=data, timeout=self.timeout)
except HeaderParsingError:
print("Received HeaderParsingError; Trying again...")
time.sleep(5)
req = self.ise.request(method, url, data=data, timeout=self.timeout)
return req
def _get_groups(self, url, filter: str = None, size: int = 20, page: int = 1):
"""
Get generic group lists.
:param url: Base URL for requesting lists
:param size: size of the page to return. Default: 20
:param page: page to return. Default: 1
:return: result dictionary
"""
result = {
'success': False,
'response': '',
'error': '',
}
# https://github.com/gruns/furl
f = furl(url)
# TODO test for valid size 1<=x>=100
f.args['size'] = size
# TODO test for valid page number?
f.args['page'] = page
# TODO add filter valication
if filter:
f.args['filter'] = filter
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
resp = self.ise.get(f.url)
if resp.status_code == 200:
result['success'] = True
result['response'] = [(i['name'], i['id'], i['description'])
for i in resp.json()['SearchResult']['resources']]
result['total'] = resp.json()['SearchResult']['total']
return result
else:
return ERS._pass_ersresponse(result, resp)
def _get_objects(self, url, filter: str = None, size: int = 20, page: int = 1):
"""
Generic method for requesting objects lists.
:param url: Base URL for requesting lists
:param filter: argument side of a ERS filter string. Default: None
:param size: size of the page to return. Default: 20
:param page: page to return. Default: 1
:return: result dictionary
"""
result = {
'success': False,
'response': '',
'error': '',
}
self.ise.headers.update(
{'Accept': 'application/json', 'Content-Type': 'application/json'})
f = furl(url)
# TODO test for valid size 1<=x>=100
f.args['size'] = size
# TODO test for valid page number?
f.args['page'] = page
# TODO add filter valication
if filter:
f.args['filter'] = filter
resp = self.ise.get(f.url)
# TODO add dynamic paging?
if resp.status_code == 200:
json_res = resp.json()['SearchResult']
if int(json_res['total']) >= 1:
result['success'] = True
if json_res.get('nextPage'):
result['nextPage'] = json_res['nextPage']['href'].split('=')[-1]
if json_res.get('previousPage'):
result['prev'] = json_res['previousPage']['href'].split('=')[-1]
result['total'] = json_res['total']
result['response'] = [(i['name'], i['id'])
for i in json_res['resources']]
return result
elif int(json_res['total']) == 0:
result['success'] = True
result['response'] = []
result['total'] = json_res['total']
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_endpoint_groups(self, size=20, page=1):
"""
Get all endpoint identity groups.
:param size: Size of the number of identity groups before pagination starts
:return: result dictionary
"""
return self._get_groups('{0}/config/endpointgroup'.format(self.url_base), size=size, page=page)
def get_endpoint_group(self, group):
"""
Get endpoint identity group details.
:param group: Name of the identity group
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
# If it's a valid OID, perform a more direct GET-call
if self._oid_test(group):
result = self.get_object(
'{0}/config/endpointgroup'.format(self.url_base),
group,
'EndPointGroup'
)
return result
# If not valid OID, perform regular search
else:
resp = self.ise.get(
'{0}/config/endpointgroup?filter=name.EQ.{1}'.format(self.url_base, group))
found_group = resp.json()
if found_group['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/endpointgroup'.format(self.url_base), found_group['SearchResult']['resources'][0]['id'], "EndPointGroup") # noqa E501
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_endpoints(self, groupID=None, size=20, page=1):
"""
Get all endpoints.
:param groupID: List only endpoints in a specific GroupID. Default: None
:return: result dictionary
"""
if groupID:
filter = f"groupId.EQ.{groupID}"
else:
filter = None
return self._get_objects('{0}/config/endpoint'.format(self.url_base), filter=filter, size=size, page=page)
def get_sgts(self, sgtNum=None, detail=False, size=20, page=1):
"""
Get all Secure Group Tags.
:param sgtNum: retrieve sgt configuration for given SGT Number
:param detail: recursively retrieve all data for all SGTs in list (rather than just summary data)
:return: result dictionary
"""
if sgtNum:
filter = f"value.EQ.{sgtNum}"
else:
filter = None
if detail:
out_objs = []
objs = self._get_objects('{0}/config/sgt'.format(self.url_base), filter=filter, size=size, page=page)
for o in objs["response"]:
sgt = self.get_object('{0}/config/sgt'.format(self.url_base), o[1], 'Sgt')
out_objs.append(sgt["response"])
return {"success": objs["success"], "response": out_objs, "error": objs["error"]}
else:
return self._get_objects('{0}/config/sgt'.format(self.url_base), filter=filter, size=size, page=page)
def get_sgt(self, sgt):
"""
Get Secure Group Tag details.
:param sgt: name or Object ID of the Secure Group Tag
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
# If it's a valid OID, perform a more direct GET-call
if self._oid_test(sgt):
result = self.get_object(
'{0}/config/sgt'.format(self.url_base),
sgt,
'Sgt'
)
return result
# If not valid OID, perform regular search
else:
if isinstance(sgt, int):
resp = self.ise.get(
'{0}/config/sgt?filter=value.EQ.{1}'.format(self.url_base, sgt))
else:
resp = self.ise.get(
'{0}/config/sgt?filter=name.EQ.{1}'.format(self.url_base, sgt))
found_group = resp.json()
if found_group['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/sgt'.format(self.url_base),
found_group['SearchResult']['resources'][0]['id'], "Sgt")
return result
else:
return ERS._pass_ersresponse(result, resp)
def add_sgt(self,
name,
description,
value,
propogate_to_apic=False,
return_object=False):
"""
Add a SGT to TrustSec Components
:param name: Name
:param description: Description
:param value: SGT Number
:param propogate_to_apic: Specific to ACI
:param return_object: Look up object after creation and return in response
"""
is_valid = ERS._sgt_name_test(name)
if not is_valid:
result = {
'success': False,
'response': '',
'error': '{0}. Invalid Security Group name, name may not be null and longer than 32 characters and '
'only contain the alphanumeric or underscore characters.'.format(name)
}
return result
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
data = {"Sgt": {'name': name, 'description': description, 'value': value,
'propogateToApic': propogate_to_apic}}
resp = self._request('{0}/config/sgt'.format(self.url_base), method='post', data=json.dumps(data))
if resp.status_code == 201:
result['success'] = True
if return_object:
result['response'] = self.get_sgt(name)["response"]
else:
result['response'] = '{0} Added Successfully'.format(name)
return result
else:
return ERS._pass_ersresponse(result, resp)
def update_sgt(self,
sgt,
name,
description,
value,
propogate_to_apic=False,
return_object=False):
"""
Update SGT in TrustSec Components
:param sgt: Object ID of sgt
:param name: Name
:param description: Description
:param value: SGT Number
:param propogate_to_apic: Specific to ACI
:param return_object: Look up object after update and return in response
"""
is_valid = ERS._sgt_name_test(name)
if not is_valid:
result = {
'success': False,
'response': '',
'error': '{0}. Invalid Security Group name, name may not be null and longer than 32 characters and '
'only contain the alphanumeric or underscore characters.'.format(name)
}
return result
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
data = {"Sgt": {'name': name, 'description': description, 'value': value,
'propogateToApic': propogate_to_apic}}
resp = self._request(('{0}/config/sgt/' + sgt).format(self.url_base), method='put', data=json.dumps(data))
if resp.status_code == 200:
result['success'] = True
if return_object:
result['response'] = self.get_sgt(sgt)["response"]
else:
result['response'] = resp.json()
return result
else:
return ERS._pass_ersresponse(result, resp)
def delete_sgt(self, sgt):
"""
Delete SGT in TrustSec Components
:param sgt: Object ID of sgt
:return: Result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self._request('{0}/config/sgt/{1}'.format(self.url_base, sgt), method='delete')
if resp.status_code == 204:
result['success'] = True
result['response'] = '{0} Deleted Successfully'.format(sgt)
return result
elif resp.status_code == 404:
result['response'] = '{0} not found'.format(sgt)
result['error'] = resp.status_code
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_sgacls(self, detail=False, size=20, page=1):
"""
Get all Secure Group ACLs.
:param detail: recursively retrieve all data for all SGACLs in list (rather than just summary data)
:return: result dictionary
"""
filter = None
if detail:
out_objs = []
objs = self._get_objects('{0}/config/sgacl'.format(self.url_base), filter=filter, size=size, page=page)
for o in objs["response"]:
sgacl = self.get_object('{0}/config/sgacl'.format(self.url_base), o[1], 'Sgacl')
out_objs.append(sgacl["response"])
return {"success": objs["success"], "response": out_objs, "error": objs["error"]}
else:
return self._get_objects('{0}/config/sgacl'.format(self.url_base), filter=filter, size=size, page=page)
def get_sgacl(self, sgacl):
"""
Get Secure Group ACL details.
:param sgacl: name or Object ID of the Secure Group ACL
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
# If it's a valid OID, perform a more direct GET-call
if self._oid_test(sgacl):
result = self.get_object(
'{0}/config/sgacl'.format(self.url_base),
sgacl,
'Sgacl'
)
return result
# If not valid OID, perform regular search
else:
resp = self.ise.get(
'{0}/config/sgacl?filter=name.EQ.{1}'.format(self.url_base, sgacl))
found_group = resp.json()
if found_group['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/sgacl'.format(self.url_base),
found_group['SearchResult']['resources'][0]['id'], "Sgacl")
return result
else:
return ERS._pass_ersresponse(result, resp)
def add_sgacl(self,
name,
description,
ip_version,
acl_content,
return_object=False):
"""
Add a SG ACL to TrustSec Components
:param name: Name
:param description: Description
:param ip_version: IPV4, IPV6, or IP_AGNOSTIC
:param acl_content: List of ACLs
:param return_object: Look up object after creation and return in response
"""
is_valid = ERS._sgacl_name_test(name)
if not is_valid:
result = {
'success': False,
'response': '',
'error': '{0}. Invalid SGACL name, name should start with a letter and can only contain the '
'alphanumeric or underscore characters.'.format(name)
}
return result
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
data = {"Sgacl": {'name': name, 'description': description, 'ipVersion': ip_version,
'aclcontent': "\n".join(acl_content)}}
resp = self._request('{0}/config/sgacl'.format(self.url_base), method='post', data=json.dumps(data))
if resp.status_code == 201:
result['success'] = True
if return_object:
result['response'] = self.get_sgacl(name)["response"]
else:
result['response'] = '{0} Added Successfully'.format(name)
return result
else:
return ERS._pass_ersresponse(result, resp)
def update_sgacl(self,
sgacl,
name,
description,
ip_version,
acl_content,
return_object=False):
"""
Update a SG ACL from TrustSec Components
:param sgacl: Object ID of sgacl
:param name: Name
:param description: Description
:param ip_version: IPV4, IPV6, or IP_AGNOSTIC
:param acl_content: List of ACLs
:param return_object: Look up object after creation and return in response
"""
is_valid = ERS._sgacl_name_test(name)
if not is_valid:
result = {
'success': False,
'response': '',
'error': '{0}. Invalid SGACL name, name should start with a letter and can only contain the '
'alphanumeric or underscore characters.'.format(name)
}
return result
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
data = {"Sgacl": {'name': name, 'description': description, 'ipVersion': ip_version,
'aclcontent': "\n".join(acl_content)}}
resp = self._request(('{0}/config/sgacl/' + sgacl).format(self.url_base), method='put',
data=json.dumps(data))
if resp.status_code == 200:
result['success'] = True
if return_object:
result['response'] = self.get_sgacl(sgacl)["response"]
else:
result['response'] = resp.json()
return result
else:
return ERS._pass_ersresponse(result, resp)
def delete_sgacl(self, sgacl):
"""
Delete SGACL in TrustSec Components
:param sgacl: Object ID of sgacl
:return: Result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self._request('{0}/config/sgacl/{1}'.format(self.url_base, sgacl), method='delete')
if resp.status_code == 204:
result['success'] = True
result['response'] = '{0} Deleted Successfully'.format(sgacl)
return result
elif resp.status_code == 404:
result['response'] = '{0} not found'.format(sgacl)
result['error'] = resp.status_code
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_egressmatrixcells(self, detail=False, size=20, page=1):
"""
Get all TrustSec Egress Matrix Cells.
:param detail: recursively retrieve all data for all egress cells in list (rather than just summary data)
:return: result dictionary
"""
filter = None
if detail:
out_objs = []
objs = self._get_objects('{0}/config/egressmatrixcell'.format(self.url_base), filter=filter, size=size,
page=page)
for o in objs["response"]:
emc = self.get_object('{0}/config/egressmatrixcell'.format(self.url_base), o[1], 'EgressMatrixCell')
out_objs.append(emc["response"])
return {"success": objs["success"], "response": out_objs, "error": objs["error"]}
else:
return self._get_objects('{0}/config/egressmatrixcell'.format(self.url_base), filter=filter, size=size,
page=page)
def get_egressmatrixcell(self, emc, src_sgt=None, dst_sgt=None):
"""
Get TrustSec Egress Matrix Cell Policy details.
:param emc: name or Object ID of the TrustSec Egress Matrix Cell Policy
:param src_sgt: name or Object ID of the Source SGT in the Policy
:param src_sgt: name or Object ID of the Dest SGT in the Policy
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
# If it's a valid OID, perform a more direct GET-call
if self._oid_test(emc):
result = self.get_object(
'{0}/config/egressmatrixcell'.format(self.url_base),
emc,
'EgressMatrixCell'
)
return result
# If not valid OID, perform regular search
else:
if emc:
resp = self.ise.get(
'{0}/config/egressmatrixcell?filter=description.EQ.{1}'.format(
self.url_base, emc))
found_group = resp.json()
elif src_sgt and dst_sgt:
srcsgtval = self.get_sgt(src_sgt)["response"]["value"]
dstsgtval = self.get_sgt(dst_sgt)["response"]["value"]
resp = self.ise.get(
'{0}/config/egressmatrixcell?filter=sgtSrcValue.EQ.{1}&filter=sgtDstValue.EQ.{2}'.format(
self.url_base, srcsgtval, dstsgtval))
found_group = resp.json()
else:
return result
if found_group['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/egressmatrixcell'.format(self.url_base),
found_group['SearchResult']['resources'][0]['id'], "EgressMatrixCell")
return result
else:
return ERS._pass_ersresponse(result, resp)
def add_egressmatrixcell(self,
source_sgt,
destination_sgt,
default_rule,
acls=None,
description=None,
return_object=False):
"""
Add TrustSec Egress Matrix Cell Policy.
:param description: Description
:param source_sgt: Source SGT name or Object ID
:param destination_sgt: Destination SGT name or Object ID
:param default_rule: "NONE", "PERMIT_IP", "DENY_IP"
:param acls: list of SGACL Object IDs. Can be None.
:param return_object: Look up object after creation and return in response
"""
# ISE will actually allow you to post duplicate polices, so before we execute the post, double check to
# make sure a policy doesn't already exist
src_sgt_r = self.get_sgt(source_sgt)["response"]
dst_sgt_r = self.get_sgt(destination_sgt)["response"]
if src_sgt_r and dst_sgt_r:
src_sgt = src_sgt_r.get("id", None)
dst_sgt = dst_sgt_r.get("id", None)
celldata = self.get_egressmatrixcell(None, src_sgt=src_sgt, dst_sgt=dst_sgt)["response"]
else:
celldata = src_sgt = dst_sgt = None
if celldata:
result = {
'success': False,
'response': '',
'error': 'There is already a policy present for this source and destination. Please use update to make '
'policy changes.'
}
return result
elif default_rule == "NONE" and acls is None:
result = {
'success': False,
'response': '',
'error': 'You must specify one or more acls as a list, or a default_rule; both cannot be blank'
}
return result
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
newacls = []
if acls:
for a in acls:
if self._oid_test(a):
newacls.append(a)
else:
newacl = self.get_sgacl(a)["response"].get("id", None)
if newacl:
newacls.append(newacl)
data = {"EgressMatrixCell": {'description': description,
'sourceSgtId': src_sgt,
'destinationSgtId': dst_sgt,
'defaultRule': default_rule, "matrixCellStatus": "ENABLED",
'sgacls': newacls}}
resp = self._request('{0}/config/egressmatrixcell'.format(self.url_base), method='post',
data=json.dumps(data))
if resp.status_code == 201:
result['success'] = True
if return_object:
result['response'] = self.get_egressmatrixcell(None, src_sgt=src_sgt, dst_sgt=dst_sgt)["response"]
else:
result['response'] = '{0} Added Successfully'.format(description)
return result
else:
return ERS._pass_ersresponse(result, resp)
def update_egressmatrixcell(self,
emc,
source_sgt,
destination_sgt,
default_rule,
acls=None,
description=None,
return_object=False):
"""
Update TrustSec Egress Matrix Cell Policy.
:param emc: Object ID of egress matrix cell
:param description: Description
:param source_sgt: Source SGT name or Object ID
:param destination_sgt: Destination SGT name or Object ID
:param default_rule: "NONE", "PERMIT_IP", "DENY_IP"
:param acls: list of SGACL Object IDs. Can be None.
:param return_object: Look up object after creation and return in response
"""
if not emc:
result = {
'success': False,
'response': '',
'error': 'You must provide the egress matrix cell object id in order to update it.'
}
return result
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
newacls = []
if acls:
for a in acls:
if self._oid_test(a):
newacls.append(a)
else:
newacl = self.get_sgacl(a)["response"].get("id", None)
if newacl:
newacls.append(newacl)
src_sgt = self.get_sgt(source_sgt)["response"]
dst_sgt = self.get_sgt(destination_sgt)["response"]
if src_sgt and dst_sgt:
data = {"EgressMatrixCell": {'id': emc, 'description': description,
'sourceSgtId': src_sgt["id"],
'destinationSgtId': dst_sgt["id"],
'defaultRule': default_rule, "matrixCellStatus": "ENABLED",
'sgacls': newacls}}
resp = self._request(('{0}/config/egressmatrixcell/' + emc).format(self.url_base), method='put',
data=json.dumps(data))
if resp.status_code == 200:
result['success'] = True
if return_object:
result['response'] = self.get_egressmatrixcell(emc)["response"]
else:
result['response'] = resp.json()
return result
else:
return ERS._pass_ersresponse(result, resp)
else:
return {
'success': False,
'response': None,
'error': '',
}
def delete_egressmatrixcell(self, emc):
"""
Delete TrustSec Egress Matrix Cell Policy.
:param emc: Object ID of egress matrix cell policy
:return: Result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self._request('{0}/config/egressmatrixcell/{1}'.format(self.url_base, emc), method='delete')
if resp.status_code == 204:
result['success'] = True
result['response'] = '{0} Deleted Successfully'.format(emc)
return result
elif resp.status_code == 404:
result['response'] = '{0} not found'.format(emc)
result['error'] = resp.status_code
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_object(self, url: str, objectid: str, objecttype: str):
"""
Get generic object lists.
:param url: Base URL for requesting lists
:param objectid: ID retreved from previous search.
:param objecttype: "ERSEndPoint", etc...
:return: result dictionary
"""
result = {
'success': False,
'response': '',
'error': '',
}
self.ise.headers.update(
{'Accept': 'application/json', 'Content-Type': 'application/json'})
f = furl(url)
f.path /= objectid
resp = self.ise.get(f.url)
if resp.status_code == 200:
result['success'] = True
result['response'] = resp.json()[objecttype]
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_endpoint(self, mac_address):
"""
Get endpoint details.
:param mac_address: MAC address of the endpoint
:return: result dictionary
"""
is_valid = ERS._mac_test(mac_address)
if not is_valid:
raise InvalidMacAddress(
'{0}. Must be in the form of AA:BB:CC:00:11:22'.format(mac_address))
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/endpoint?filter=mac.EQ.{1}'.format(self.url_base, mac_address))
found_endpoint = resp.json()
if found_endpoint['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/endpoint/'.format(self.url_base), found_endpoint['SearchResult']['resources'][0]['id'], 'ERSEndPoint') # noqa E501
return result
elif found_endpoint['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(mac_address)
result['error'] = 404
return result
else:
result['response'] = '{0} not found'.format(mac_address)
result['error'] = resp.status_code
return result
def add_endpoint(self,
name,
mac,
group_id,
static_profile_assigment='false',
static_group_assignment='true',
profile_id='',
description='',
portalUser='',
customAttributes={}):
"""
Add a user to the local user store.
:param name: Name
:param mac: Macaddress
:param group_id: OID of group to add endpoint in
:param static_profile_assigment: Set static profile
:param static_group_assignment: Set static group
:param profile_id: OID of profile
:param description: User description
:param portaluser: Portal username
:param customAttributes: key value pairs of custom attributes
:return: result dictionary
"""
is_valid = ERS._mac_test(mac)
if not is_valid:
raise InvalidMacAddress(
'{0}. Must be in the form of AA:BB:CC:00:11:22'.format(mac))
else:
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
data = {"ERSEndPoint": {'name': name, 'description': description, 'mac': mac,
'profileId': profile_id, 'staticProfileAssignment': static_profile_assigment,
'groupId': group_id, 'staticGroupAssignment': static_group_assignment,
'portalUser': portalUser, 'customAttributes': {'customAttributes': customAttributes}
}
}
resp = self._request('{0}/config/endpoint'.format(self.url_base), method='post',
data=json.dumps(data))
if resp.status_code == 201:
result['success'] = True
result['response'] = '{0} Added Successfully'.format(name)
return result
else:
return ERS._pass_ersresponse(result, resp)
def delete_endpoint(self, mac):
"""
Delete an endpoint.
:param mac: Endpoint Macaddress
:return: Result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/endpoint?filter=mac.EQ.{1}'.format(self.url_base, mac))
found_endpoint = resp.json()
if found_endpoint['SearchResult']['total'] == 1:
endpoint_oid = found_endpoint['SearchResult']['resources'][0]['id']
resp = self._request(
'{0}/config/endpoint/{1}'.format(self.url_base, endpoint_oid), method='delete')
if resp.status_code == 204:
result['success'] = True
result['response'] = '{0} Deleted Successfully'.format(mac)
return result
elif resp.status_code == 404:
result['response'] = '{0} not found'.format(mac)
result['error'] = resp.status_code
return result
else:
return ERS._pass_ersresponse(result, resp)
elif found_endpoint['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(mac)
result['error'] = 404
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_identity_groups(self, filter=None, size=20, page=1):
"""
Get all identity groups.
:param filter: ISE style filter syntax. Default: None
:return: result dictionary
"""
return self._get_groups('{0}/config/identitygroup'.format(self.url_base), filter=filter, size=size, page=page)
def get_identity_group(self, group):
"""
Get identity group details.
:param group: Name of the identity group
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/identitygroup?filter=name.EQ.{1}'.format(self.url_base, group))
found_group = resp.json()
if found_group['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/identitygroup/'.format(
self.url_base), found_group['SearchResult']['resources'][0]['id'], 'IdentityGroup')
return result
elif found_group['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(group)
result['error'] = 404
return result
else:
result['response'] = '{0} not found'.format(group)
result['error'] = resp.status_code
return result
def get_users(self, size=20, page=1):
"""
Get all internal users.
:return: List of tuples of user details
"""
return self._get_objects('{0}/config/internaluser'.format(self.url_base), size=size, page=page)
def get_user(self, user_id):
"""
Get user detailed info.
:param user_id: User ID
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/internaluser?filter=name.EQ.{1}'.format(self.url_base, user_id))
found_user = resp.json()
if found_user['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/internaluser/'.format(
self.url_base), found_user['SearchResult']['resources'][0]['id'], 'InternalUser')
return result
elif found_user['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(user_id)
result['error'] = 404
return result
else:
result['response'] = 'Unknown error'
result['error'] = resp.status_code
return result
def add_user(self,
user_id,
password,
user_group_oid,
enable='',
first_name='',
last_name='',
email='',
description=''):
"""
Add a user to the local user store.
:param user_id: User ID
:param password: <PASSWORD>
:param user_group_oid: OID of group to add user to
:param enable: Enable password used for Tacacs
:param first_name: First name
:param last_name: Last name
:param email: email address
:param description: User description
:return: result dictionary
"""
result = {
'success': False,
'response': '',
'error': '',
}
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
data = {"InternalUser": {'name': user_id, 'password': password, 'enablePassword': enable,
'firstName': first_name, 'lastName': last_name, 'email': email,
'description': description, 'identityGroups': user_group_oid}}
resp = self._request('{0}/config/internaluser'.format(self.url_base), method='post',
data=json.dumps(data))
if resp.status_code == 201:
result['success'] = True
result['response'] = '{0} Added Successfully'.format(user_id)
return result
else:
return ERS._pass_ersresponse(result, resp)
def delete_user(self, user_id):
"""
Delete a user.
:param user_id: User ID
:return: Result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/internaluser?filter=name.EQ.{1}'.format(self.url_base, user_id))
found_user = resp.json()
if found_user['SearchResult']['total'] == 1:
user_oid = found_user['SearchResult']['resources'][0]['id']
resp = self._request(
'{0}/config/internaluser/{1}'.format(self.url_base, user_oid), method='delete')
if resp.status_code == 204:
result['success'] = True
result['response'] = '{0} Deleted Successfully'.format(user_id)
return result
elif resp.status_code == 404:
result['response'] = '{0} not found'.format(user_id)
result['error'] = resp.status_code
return result
else:
return ERS._pass_ersresponse(result, resp)
elif found_user['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(user_id)
result['error'] = 404
return result
else:
return ERS._pass_ersresponse(result, resp)
def get_device_groups(self, size=20, page=1):
"""
Get a list tuples of device groups.
:return:
"""
return self._get_groups('{0}/config/networkdevicegroup'.format(self.url_base), size=size, page=page)
def get_device_group(self, device_group_oid):
"""
Get a device group details.
:param device_group_oid: oid of the device group
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
return self.get_object('{0}/config/networkdevicegroup/'.format(self.url_base), device_group_oid, 'NetworkDeviceGroup') # noqa E501
def get_devices(self, filter=None, size=20, page=1):
"""
Get a list of devices.
:return: result dictionary
"""
return self._get_objects('{0}/config/networkdevice'.format(self.url_base), filter=filter, size=size, page=page)
def get_device(self, device):
"""
Get a device detailed info.
:param device: device_name
:return: result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/networkdevice?filter=name.EQ.{1}'.format(self.url_base, device))
found_device = resp.json()
if found_device['SearchResult']['total'] == 1:
result = self.get_object('{0}/config/networkdevice/'.format(self.url_base), found_device['SearchResult']['resources'][0]['id'], 'NetworkDevice') # noqa E501
return result
elif found_device['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(device)
result['error'] = 404
return result
else:
return ERS._pass_ersresponse(result, resp)
def add_device(self,
name,
ip_address,
radius_key,
snmp_ro,
dev_group,
dev_location,
dev_type,
description='',
snmp_v='TWO_C',
dev_profile='Cisco',
tacacs_shared_secret=None,
tacas_connect_mode_options='ON_LEGACY'
):
"""
Add a device.
:param name: name of device
:param ip_address: IP address of device
:param radius_key: Radius shared secret
:param snmp_ro: SNMP read only community string
:param dev_group: Device group name
:param dev_location: Device location
:param dev_type: Device type
:param description: Device description
:param dev_profile: Device profile
:return: Result dictionary
"""
result = {
'success': False,
'response': '',
'error': '',
}
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
data = {'NetworkDevice': {'name': name,
'description': description,
'authenticationSettings': {
'networkProtocol': 'RADIUS',
'radiusSharedSecret': radius_key,
'enableKeyWrap': 'false',
},
'snmpsettings': {
'version': 'TWO_C',
'roCommunity': snmp_ro,
'pollingInterval': 3600,
'linkTrapQuery': 'true',
'macTrapQuery': 'true',
'originatingPolicyServicesNode': 'Auto'
},
'profileName': dev_profile,
'coaPort': 1700,
'NetworkDeviceIPList': [{
'ipaddress': ip_address,
'mask': 32
}],
'NetworkDeviceGroupList': [
dev_group, dev_type, dev_location,
'IPSEC#Is IPSEC Device#No'
]
}
}
if tacacs_shared_secret is not None:
data['NetworkDevice']['tacacsSettings'] = {
'sharedSecret': tacacs_shared_secret,
'connectModeOptions': tacas_connect_mode_options
}
resp = self._request('{0}/config/networkdevice'.format(self.url_base), method='post',
data=json.dumps(data))
if resp.status_code == 201:
result['success'] = True
result['response'] = '{0} Added Successfully'.format(name)
return result
else:
return ERS._pass_ersresponse(result, resp)
def delete_device(self, device):
"""
Delete a device.
:param device: device_name
:return: Result dictionary
"""
self.ise.headers.update(
{'ACCEPT': 'application/json', 'Content-Type': 'application/json'})
result = {
'success': False,
'response': '',
'error': '',
}
resp = self.ise.get(
'{0}/config/networkdevice?filter=name.EQ.{1}'.format(self.url_base, device))
found_device = resp.json()
if found_device['SearchResult']['total'] == 1:
device_oid = found_device['SearchResult']['resources'][0]['id']
resp = self._request(
'{0}/config/networkdevice/{1}'.format(self.url_base, device_oid), method='delete')
if resp.status_code == 204:
result['success'] = True
result['response'] = '{0} Deleted Successfully'.format(device)
return result
elif resp.status_code == 404:
result['response'] = '{0} not found'.format(device)
result['error'] = resp.status_code
return result
else:
return ERS._pass_ersresponse(result, resp)
elif found_device['SearchResult']['total'] == 0:
result['response'] = '{0} not found'.format(device)
result['error'] = 404
return result
else:
return ERS._pass_ersresponse(result, resp)
```
#### File: adaptive-policy-sync/scripts/clean_tasks.py
```python
from apscheduler.schedulers.background import BackgroundScheduler
from django_apscheduler.jobstores import DjangoJobStore
from django_apscheduler.jobstores import register_events
from django_apscheduler.models import DjangoJobExecution
scheduler = BackgroundScheduler()
scheduler.add_jobstore(DjangoJobStore(), "default")
def cleanup():
DjangoJobExecution.objects.delete_old_job_executions(3600)
def run():
cleanup()
@scheduler.scheduled_job("interval", hours=8, id="clean_tasks")
def job():
cleanup()
register_events(scheduler)
scheduler.start()
```
#### File: adaptive-policy-sync/scripts/dashboard_monitor.py
```python
from apscheduler.schedulers.background import BackgroundScheduler
from django_apscheduler.jobstores import DjangoJobStore
from django_apscheduler.jobstores import register_events
from sync.models import SyncSession, Tag, ACL, Policy
from django.db.models import F, Q
from django.utils.timezone import make_aware
import sys
import datetime
import json
from scripts.db_trustsec import clean_sgts, clean_sgacls, clean_sgpolicies, merge_sgts, merge_sgacls, merge_sgpolicies
from scripts.dblog import append_log, db_log
import meraki
from scripts.meraki_addons import meraki_read_sgt, meraki_read_sgacl, meraki_read_sgpolicy, meraki_update_sgt, \
meraki_create_sgt, meraki_update_sgacl, meraki_create_sgacl, meraki_update_sgpolicy, meraki_delete_sgt, \
meraki_delete_sgacl
from django.conf import settings
import traceback
scheduler = BackgroundScheduler()
scheduler.add_jobstore(DjangoJobStore(), "default")
def ingest_dashboard_data(accounts, log):
append_log(log, "dashboard_monitor::ingest_dashboard_data::Accounts -", accounts)
dt = make_aware(datetime.datetime.now())
for sa in accounts:
dashboard = None
a = sa.dashboard
append_log(log, "dashboard_monitor::ingest_dashboard_data::Resync -", a.description)
dashboard = meraki.DashboardAPI(base_url=a.baseurl, api_key=a.apikey, print_console=False, output_log=False,
caller=settings.CUSTOM_UA)
sgts = meraki_read_sgt(dashboard, a.orgid)
sgacls = meraki_read_sgacl(dashboard, a.orgid)
sgpolicies = meraki_read_sgpolicy(dashboard, a.orgid)
append_log(log, "dashboard_monitor::ingest_dashboard_data::SGTs - ", sgts)
append_log(log, "dashboard_monitor::ingest_dashboard_data::SGACLs - ", sgacls)
append_log(log, "dashboard_monitor::ingest_dashboard_data::Policies - ", sgpolicies)
merge_sgts("meraki", sgts, not sa.ise_source, sa, log)
merge_sgacls("meraki", sgacls, not sa.ise_source, sa, log)
merge_sgpolicies("meraki", sgpolicies, not sa.ise_source, sa, log)
clean_sgts("meraki", sgts, not sa.ise_source, sa, log)
clean_sgacls("meraki", sgacls, not sa.ise_source, sa, log)
clean_sgpolicies("meraki", sgpolicies, not sa.ise_source, sa, log)
a.raw_data = json.dumps({"groups": sgts, "acls": sgacls, "bindings": sgpolicies})
a.force_rebuild = False
a.last_sync = dt
a.last_update = dt
a.skip_sync = True
a.save()
def digest_database_data(sa, log):
append_log(log, "dashboard_monitor::digest_database_data::Account -", sa)
dashboard = meraki.DashboardAPI(base_url=sa.dashboard.baseurl, api_key=sa.dashboard.apikey, print_console=False,
output_log=False, caller=settings.CUSTOM_UA)
if not sa.apply_changes:
append_log(log, "dashboard_monitor::digest_database_data::sync session not set to apply changes;")
return
tags = Tag.objects.filter(Q(needs_update="meraki") & Q(do_sync=True) & Q(update_failed=False))
for o in tags:
if o.meraki_id:
if o.push_delete:
try:
ret = meraki_delete_sgt(dashboard, sa.dashboard.orgid, o.meraki_id)
append_log(log, "dashboard_monitor::digest_database_data::SGT delete", ret)
o.delete()
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::SGT Delete Exception", e,
traceback.format_exc())
o.update_failed = True
o.save()
else:
try:
ret = meraki_update_sgt(dashboard, sa.dashboard.orgid, o.meraki_id, name=o.name,
description=o.description, value=o.tag_number)
o.last_update_data = json.dumps(ret)
o.last_update_state = "True" if "groupId" in ret else "False"
o.save()
# Value update causes a delete/create combination, so immediately update with new ID
Tag.objects.filter(id=o.id).update(meraki_id=ret["groupId"])
merge_sgts("meraki", [ret], not sa.ise_source, sa, log)
append_log(log, "dashboard_monitor::digest_database_data::Push SGT update", o.meraki_id, o.name,
o.description, ret)
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::SGT Update Exception", e,
traceback.format_exc())
o.update_failed = True
o.save()
else:
try:
ret = meraki_create_sgt(dashboard, sa.dashboard.orgid, value=o.tag_number, name=o.name,
description=o.description)
o.last_update_data = json.dumps(ret)
o.last_update_state = "True" if "groupId" in ret else "False"
o.save()
merge_sgts("meraki", [ret], not sa.ise_source, sa, log)
append_log(log, "dashboard_monitor::digest_database_data::Push SGT create", o.tag_number, o.name,
o.description, ret)
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::SGT Create Exception", e,
traceback.format_exc())
o.update_failed = True
o.save()
acls = ACL.objects.filter(Q(needs_update="meraki") & Q(do_sync=True) & Q(update_failed=False))
for o in acls:
if o.meraki_id:
if o.push_delete:
try:
ret = meraki_delete_sgacl(dashboard, sa.dashboard.orgid, o.meraki_id)
append_log(log, "dashboard_monitor::digest_database_data::SGACL delete", ret)
o.delete()
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::SGACL Delete Exception", e,
traceback.format_exc())
o.update_failed = True
o.save()
else:
try:
ret = meraki_update_sgacl(dashboard, sa.dashboard.orgid, o.meraki_id, name=o.name,
description=o.description, rules=o.get_rules("meraki"),
ipVersion=o.get_version("meraki"))
o.last_update_data = json.dumps(ret)
o.last_update_state = "True" if "aclId" in ret else "False"
o.save()
merge_sgacls("meraki", [ret], not sa.ise_source, sa, log)
append_log(log, "dashboard_monitor::digest_database_data::Push SGACL update", o.meraki_id, o.name,
o.description, ret)
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::SGACL Update Exception", e,
traceback.format_exc())
o.update_failed = True
o.save()
else:
try:
ret = meraki_create_sgacl(dashboard, sa.dashboard.orgid, name=o.name,
description=o.description, rules=list(o.get_rules("meraki")),
ipVersion=o.get_version("meraki"))
o.last_update_data = json.dumps(ret)
o.last_update_state = "True" if "aclId" in ret else "False"
o.save()
merge_sgacls("meraki", [ret], not sa.ise_source, sa, log)
append_log(log, "dashboard_monitor::digest_database_data::Push SGACL create", o.name,
o.description, ret)
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::SGACL Create Exception", e,
traceback.format_exc())
o.update_failed = True
o.save()
policies = Policy.objects.filter(Q(needs_update="meraki") & Q(do_sync=True) & Q(update_failed=False))
for o in policies:
if o.push_delete:
try:
srcsgt, dstsgt = o.lookup_ise_sgts()
ret = meraki_update_sgpolicy(dashboard, sa.dashboard.orgid, name=o.name, description=o.description,
srcGroupId=srcsgt.meraki_id, dstGroupId=dstsgt.meraki_id, aclIds=None,
catchAllRule="global")
append_log(log, "dashboard_monitor::digest_database_data::Policy delete", ret)
o.delete()
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::Policy Delete Exception", e,
traceback.format_exc())
else:
try:
srcsgt, dstsgt = o.lookup_ise_sgts()
ret = meraki_update_sgpolicy(dashboard, sa.dashboard.orgid, name=o.name, description=o.description,
srcGroupId=srcsgt.meraki_id, dstGroupId=dstsgt.meraki_id,
aclIds=o.get_sgacls("meraki"), catchAllRule=o.get_catchall("meraki"),
bindingEnabled=True, monitorModeEnabled=False)
o.last_update_data = json.dumps(ret)
o.last_update_state = "True" if "srcGroupId" in ret else "False"
o.save()
merge_sgpolicies("meraki", [ret], not sa.ise_source, sa, log)
append_log(log, "dashboard_monitor::digest_database_data::Push Policy update", o.meraki_id, o.name,
o.description, ret)
except Exception as e: # pragma: no cover
append_log(log, "dashboard_monitor::digest_database_data::Policy Update Exception", e,
traceback.format_exc())
def sync_dashboard():
log = []
msg = ""
append_log(log, "dashboard_monitor::sync_dashboard::Checking Dashboard Accounts for re-sync...")
# Ensure that ISE has already completed a sync if it is the source of truth
stat = SyncSession.objects.filter(Q(ise_source=False) |
(Q(iseserver__last_sync__isnull=False) &
Q(dashboard__last_sync__isnull=True)) |
(Q(iseserver__last_sync__isnull=False) &
Q(iseserver__last_sync__gte=F('dashboard__last_sync'))))
if len(stat) <= 0:
append_log(log, "dashboard_monitor::sync_dashboard::Skipping sync as ISE is primary and needs to sync first.")
msg = "SYNC_DASHBOARD-ISE_NEEDS_SYNC"
else:
append_log(log, "dashboard_monitor::sync_dashboard::Running sync")
for s in stat:
ctime = make_aware(datetime.datetime.now()) - datetime.timedelta(seconds=s.sync_interval)
# Perform sync if one of the following conditions is met
# 1) The Sync Session is set to Force Rebuild (this one shouldn't be seen here. but just in case...)
# 2) The Dashboard Instance is set to Force Rebuild
# 3) The timestamp of the Dashboard database object isn't the same as the timestamp of it's last sync
# 4) The timestamp of the Dashboard database object's last sync is beyond the configured manual sync timer
dbs = SyncSession.objects.filter(Q(dashboard__force_rebuild=True) |
Q(force_rebuild=True) |
~Q(dashboard__last_sync=F('dashboard__last_update')) |
Q(dashboard__last_sync__lte=ctime))
for d in dbs:
# Log the reason(s) for the current sync
if d.force_rebuild: # pragma: no cover
append_log(log, "dashboard_monitor::sync_dashboard::Sync Session Force Rebuild", d)
msg = "SYNC_DASHBOARD-SYNCSESSION_FORCE_REBUILD"
d.force_rebuild = False
d.save()
if d.dashboard.force_rebuild:
append_log(log, "dashboard_monitor::sync_dashboard::Dashboard Force Rebuild", d)
msg = "SYNC_DASHBOARD-DASHBOARD_FORCE_REBUILD"
d.dashboard.force_rebuild = False
d.dashboard.save()
if d.dashboard.last_sync != d.dashboard.last_update:
append_log(log, "dashboard_monitor::sync_dashboard::Database Config / Sync Timestamp Mismatch", d)
msg = "SYNC_DASHBOARD-CONFIG_SYNC_TIMESTAMP_MISMATCH"
if d.dashboard.last_sync and (d.dashboard.last_sync <= ctime):
append_log(log, "dashboard_monitor::sync_dashboard::Past Manual Sync Interval", d)
msg = "SYNC_DASHBOARD-PAST_SYNC_INTERVAL"
ingest_dashboard_data(dbs, log)
ss = SyncSession.objects.all()
if len(ss) > 0:
digest_database_data(ss[0], log)
append_log(log, "dashboard_monitor::sync_dashboard::Done")
db_log("dashboard_monitor", log)
return msg, log
def run(): # pragma: no cover
sync_dashboard()
@scheduler.scheduled_job("interval", seconds=10, id="dashboard_monitor")
def job(): # pragma: no cover
sync_dashboard()
if 'test' not in sys.argv and 'test' not in sys.argv[0]: # pragma: no cover
register_events(scheduler)
scheduler.start()
``` |
{
"source": "joshandali52/textis",
"score": 2
} |
#### File: textis/BackEnd/clusterWords.py
```python
import os
import numpy as np
import pickle as pl
import multiprocessing as mp
from matplotlib import pyplot as plt
import scipy.spatial.distance as ssd
from scipy.cluster.hierarchy import dendrogram, linkage
import scipy.stats
from scipy import spatial
def loadfile(fpath, rawName):
fname = fpath + rawName + ".pic"
with open(fname, "rb") as f: return pl.load(f)
def savefile(fpath, rawName, data):
fname = fpath + rawName + ".pic"
with open(fname, "wb") as f: return pl.dump(data,f)
def getJson(clusters, leaflabels, fSuffix, cooccVecs, wtoi, clem, fpath, debugname): #save if fSuffix<>""#https://gist.github.com/mdml/7537455
"""
Draws and stores a dendrogram of a given hierarchical clustering result.
:param clusters: Hierarchical clustering result encoded as a linkage matrix
:param leaflabels: Dictionary mapping index to word
:param fSuffix: String to append to file name. If this is empty then dendrogram is not stored
:param cooccVecs: co-occ for words
:param wtoi: Dictionary mapping word to index
:param clem: Lemma used for clustering
:param fpath: folder to store dendrogram
:param debugname: String to append to file name
:returns: Nested dictionary of the dendrogram for d3
"""
import scipy.spatial
import scipy.cluster
import json
from functools import reduce
T = scipy.cluster.hierarchy.to_tree(clusters, rd=False)
id2name = leaflabels
# Draw dendrogram using matplotlib to scipy-dendrogram.pdf
fSuffix = "" #clem+debugname+"_" if np.random.random()>0.98 else "" #debug
if len(fSuffix):
os.makedirs(fpath+"debug",exist_ok=True)
plt.close()
plt.figure(figsize=(30,15))
labels = [leaflabels[i] for i in range(len(leaflabels.keys()))] # Create dictionary for labeling nodes by their IDs
scipy.cluster.hierarchy.dendrogram(clusters, labels=labels, orientation='right')
plt.savefig(fpath+"debug/"+fSuffix+"_dendro.png")
def add_node(node, parent):
"""
Creates a nested dictionary from the ClusterNode's returned by SciPy.
:param node: Node to append
:param parent: Parent of node to append
"""
newNode = dict(node_id=node.id, children=[]) # First create the new node and append it to its parent's children
parent["children"].append(newNode)
if node.left: add_node(node.left, newNode) # Recursively add the current node's children
if node.right: add_node(node.right, newNode)
d3Dendro = dict(children=[], name=clem) # Initialize nested dictionary for d3, then recursively iterate through tree
add_node(T, d3Dendro)
def is2ndLeave(node): return node["children"]==0 or sum([len(k["children"])>0 for k in node["children"]])==0
def is3rdLeave(node): return sum([not is2ndLeave(k) for k in node["children"]]) == 0
def compress(node):
if is3rdLeave(node):
kids=sum([k["children"] for k in node["children"]],[])
kidsOfKids=sum([k["children"] if len(k["children"])>0 else [k] for k in kids],[])
for k in node["children"]:
if len(k["children"])==0: kidsOfKids.append(k)
node["children"]=kidsOfKids
else:
for k in node["children"]:
if len(k["children"]): compress(k)
compress(d3Dendro)
compress(d3Dendro)
compress(d3Dendro)
compress(d3Dendro)
def label_tree(n):
"""
Labels each node with the names of each leaf in its subtree.
:param n: Nested dictionary of the dendrogram
:returns: List of leaf names
"""
if len(n["children"]) == 0: leafNames = [id2name[n["node_id"]]] # If the node is a leaf, then we have its name
else: # If not, flatten all the leaves in the node's subtree
leafNames = reduce(lambda ls, c: ls + label_tree(c), n["children"], [])
#leafNames = reduce(lambda ls, c: ls + label_tree(c), n["children"], [])
del n["node_id"] # Delete the node id since we don't need it anymore and it makes for cleaner JSON
#n["name"] = "-".join(sorted(map(str, leafNames))) # Labeling convention: "-"-separated leaf names
if len(leafNames)>3:
#print("ERR",[l for l in leafNames if wtoi[l]>=len(coen)])
#chosen = [coen[wtoi[l]][1] for l in leafNames if wtoi[l]<len(coen)] # max entropy
#chosen2 = [coen[wtoi[l]][0] for l in leafNames if wtoi[l]<len(coen)] # max count
#names = set([leafNames[0], leafNames[-1], leafNames[len(leafNames) // 2],leafNames[np.argmin(chosen)],leafNames[np.argmin(chosen2)]])-set([leafNames[np.argmin(chosen)],leafNames[np.argmin(chosen2)]])
#names=[leafNames[np.argmin(chosen)], leafNames[np.argmin(chosen2)],list(names)[0]]
counts= np.array([cooccVecs[wtoi[l]][0] for l in leafNames if wtoi[l] < len(cooccVecs)]) # max count
meancounts=np.median(counts)
lnames = [len(l) for l in leafNames if wtoi[l] < len(cooccVecs)]
meanlen=np.mean(lnames)
names=[l for l in leafNames if wtoi[l] < len(cooccVecs) and cooccVecs[wtoi[l]][0] >= meancounts and len(l) < meanlen]
if len(names)<4: names=leafNames
names=list(set([names[0],names[len(names)//2],names[-1]]))
else: names=leafNames
n["name"] = ", ".join(sorted(map(str, names))) # Labeling convention: "-"-separated leaf names
#print(names, ">",[leafNames[0], leafNames[-1], leafNames[len(leafNames) // 2]],leafNames)
#if len(leafNames)>2:
#chosen3 = [adit[wos[wtoi[l]]] for l in leafNames]
#chosen4 = [wcounts[l] for l in leafNames]
#vecs=[getVec(mats, wtoi[l], upbound, offs, cWords) for l in leafNames]
#cen=np.sum(vecs)
#dists=np.array([spatial.distance.cosine(v, cen) for v in vecs])
#print(leafNames[np.argmin(chosen)]," | ",leafNames[np.argmax(chosen2)]," | ",leafNames[np.argmax(dists)]," | ",leafNames[np.argmax(chosen3)]," | ",leafNames[np.argmax(chosen4)]," -- ",leafNames)
return leafNames
label_tree(d3Dendro["children"][0])
if len(fSuffix):
jname=fSuffix+"_d3-dendro.json"
json.dump(d3Dendro, open(fpath+"debug/"+jname, "w"), sort_keys=True, indent=4) # Output to JSON
with open("dendrogram.html", "r") as f: dhtml=f.read()
dhtml=dhtml.replace("d3-dendrogram.json",jname)
with open(fpath+"debug/"+fSuffix+"_html_dendro.html", "w") as f: f.writelines(dhtml)
return d3Dendro
def multicluster(para):
clems, assDict, storeDebugFile, coitow, coen, wtoi,fpath,debugname = para
return [cluster((k, assDict, storeDebugFile, coitow, coen, wtoi,fpath,debugname)) for k in clems]
def cluster(para):
clem,assDict,storeDebugFile,coitow,coen,wtoi,fpath,debugname = para
if np.random.random()>0.95: print("Clustering word",clem)
# if not word in wToL:
# print("NOT found AS WORD!!!!", word)
# return None
# lem = wToL[word]
#get Distance matrix of associated terms
asso=list(assDict[wtoi[clem]]['All'])
wos = {x[0]:i for i,x in enumerate(asso)}
itoWID= {v:k for k,v in wos.items()}
diMat=np.zeros((len(wos),len(wos)))
for i,iw in enumerate(wos.keys()):
if iw in assDict:
distslw=dict(assDict[iw]['All'])
for j, jw in enumerate(wos.keys()):
if iw==jw: continue #should always be 0/max value (self association of word)
if jw in distslw:
diMat[wos[iw],wos[jw]]=distslw[jw]
diMat[wos[jw],wos[iw]] = diMat[wos[iw],wos[jw]]
diMat=-diMat
diMat += np.abs(np.min(diMat))
np.fill_diagonal(diMat,0)
infinites=np.where(np.isinf(diMat))
if len(infinites):
print("Distances matrix entries infinite Count",len(infinites),"orig word",clem, " Replace them with 1e5")
diMat[infinites]=1e5
distArray = ssd.squareform(diMat) # convert the redundant n*n square matrix form into a condensed nC2 array
Z = linkage(distArray, 'ward')#''ward')
leaflabels={i:coitow[itoWID[i]] for i in range(len(itoWID.keys()))}
return getJson(Z,leaflabels,storeDebugFile,coen,wtoi,clem,fpath,debugname)
# #Plot and save dendrogram
# labs = [leaflabels[i] for i in range(len(leaflabels.keys()))]
# plt.figure(figsize=(36,18))
# plt.title('Dendrogram (truncated) for '+ word)
# plt.xlabel('sample index')
# plt.ylabel('distance')
# dendrogram(
# Z,
# truncate_mode='lastp', # show only the last p merged clusters
# p=len(wos),#*5//6, # show only the last p merged clusters
# show_leaf_counts=False, # otherwise numbers in brackets are counts
# leaf_rotation=90.,
# leaf_font_size=9.,
# labels=labs,
# show_contracted=True, # to get a distribution impression in truncated branches
# )
# plt.tight_layout()
# plt.savefig(word+".png")
# #plt.show()
# plt.close()
def doCluster(Conf):
#print(wToL["IT experience"])
#print(wToL["it experience"])
# wcounts=loadfile(Conf.wcountsname)
# compwords=loadfile(Conf.compoundsname)
#wcounts = loadfile(Conf.wcountsname)
wToL = loadfile(Conf.fpath,Conf.wToLemmaname+Conf.fending)
coitow = loadfile(Conf.fpath,Conf.coiTowname+Conf.fending)
coen = loadfile(Conf.fpath,Conf.cooccVecs+Conf.fending)
wtoi = {k: i for i, k in coitow.items()}
lemmas = list(set(wToL.values()))
existlemmas = [k for k in lemmas if k in wtoi]
existlemmas = existlemmas if not Conf.isBackendDummy else existlemmas[:10]
print(existlemmas)
#existlemmas= [k for k in lemmas if k in ["python","machine learning","communication","data science","scala"]]
manager = mp.Manager()
d2 = manager.list()
for k in coen: d2.append(k)
pool = mp.Pool(processes=Conf.nProc // 5 + 1) # a lot of overhead due to manager and data sync, better not increase this...
def getCluster(loadfname,fend,savefname):
d = manager.dict()
assAbsDict = loadfile(Conf.fpath,loadfname+fend+Conf.fending)
for k in assAbsDict: d[k] = assAbsDict[k]
joblist = [(k, d, "", coitow, d2, wtoi,Conf.fpath,loadfname+Conf.fending+fend) for k in [existlemmas[i:i + 30] for i in range(0, len(existlemmas), 30)]]
# joblist = [(k, k, "", k, coen, k) for k in [existlemmas[i:i + 50] for i in range(0, len(existlemmas), 50)]]
print(loadfname,"nJobs", len(joblist)," nWords",len(assAbsDict), "nProc", Conf.nProc // 5 + 1)
clusters = pool.map(multicluster, joblist) # run in parallel
clusters = sum(clusters, [])
allCls = {k: cl for k, cl in zip(existlemmas, clusters)}
print("Saving",savefname)
# allCls={k:cluster(k,assDict) for k in wToL.values()}
savefile(Conf.fpath,savefname+fend+Conf.fending, allCls)
for fend in ["_win","_doc"]:
getCluster(Conf.assSurname,fend,Conf.asstreename)
getCluster(Conf.assAbsname,fend, Conf.assAbstreename)
if __name__ == '__main__':
import sys
sys.path.append("../")
sys.path.append("/../")
from Config import Cfg
Conf = Cfg(False)
doCluster(Conf)
```
#### File: textis/BackEnd/coocc.py
```python
from multiprocessing import Process, Manager
import queue
import numpy as np
import time,pickle
import scipy
from scipy.sparse import dok_matrix
from scipy.sparse import csr_matrix
import datetime
"""
Data structure for co-occurrences of words within documents or short text windows.
It consists of a dense matrix (for co-occurrences of frequent words) and sparse matrices for less frequent words.
Co-occurrence matrix are symmetric, we store only half the matrix. We use rectangular matrixes of different types
Matrix looks like:
zzzzzzz
zzzzzzz
zzzzzzz
yyyyzzzzzzz
yyyyzzzzzzz
xxyyyyzzzzzzz
xxyyyyzzzzzzz
X is a dense matrix for the most frequent words that can co-occ of millions (int32)
Y is dense, but can store less frequent co-ooc (int16)
Z is sparse
Rough intuition, when it is better to store as dense vs sparse?
Assuming a document-based co-occurrences:
if a word occurs in a doc get about #words in doc pairs ~ 400 (for a doc of length 400 words) @jhandali: unlcear
One entry in sparse document matrix takes about 12 bytes + type-dependent byte size (byte_type).
For dense matrix, ie. in np array, it just takes the byte_type.
Hence, breakeven for a word is roughly as follow:
The number of documents it occurs (#docs) * 400 (document length) * (12 + byte_type) = number of words (nw) * byte_type
#docs * 400 * (12/byte_type) = nw
#docs = nw / (400*12) * byte_type
#docs = 100000 / (400*12)
#docs ~ 21, ie. more than 21 better to save in numpy array
Dynamic sizing: if a word occurs in X documents, it can co-occur at most X times with any other word
To get ideal size of a type, sort by occurrences then use the following list: [(4294967295, np.uint32), (65535, np.uint16), (255, np.uint8)]
"""
minwperdoc = 15 # documents with less words are ignored
winwid = 7 # window size = #words left and right of center words
minOcc = 0 # minimum number a word has to occur in all docs together to be considered for co-occurrences computation
cooccname ="coocc"
cooccWwithFreqname = "cooccWwithFreq"
tsize = [(4294967295, np.uint32), (65535, np.uint16), (255, np.uint8), (22, "sparse")] # sparse can have at most 255 entries #get upper boundary of indexes of types
def getDataTypeRanges(wused):
"""
Get data type size ranges.
:param wused: List of tuples containing words and their occurences
:returns: List of tuples containing word index and data type, and list of offsetted indices
"""
cpos = 0
upbound_type = []
offs = [0]
for i, (_, nocc) in enumerate(wused):
if i%500 == 0:
arrsize = (i-offs[-1])*i*np.dtype(tsize[cpos][1]).itemsize
if arrsize > 900*1000*1000: #max 900MB per matrix
upbound_type.append((i, tsize[cpos][1]))
offs.append(i)
while (nocc <= tsize[cpos+1][0]):
upbound_type.append((i, tsize[cpos][1]))
offs.append(i)
cpos += 1
if cpos == len(tsize)-1: break
if cpos == len(tsize)-1: break
upbound_type.append((len(wused), tsize[cpos][1]))
return upbound_type, offs
def save_sparse(filename, array): # note that .npz extension is added automatically
np.savez(filename, data=array.data, indices=array.indices,indptr=array.indptr, shape=array.shape)
def load_sparse(filename): # here we need to add .npz extension manually
loader = np.load(filename + '.npz')
return csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'])
def getMats(upbound, offs, nw):
"""
Creates empty co-occurrence matrices
:param upbound: List of tuples containing word index and data type
:param offs: List of offsetted indices
:param nw: Number of words
:returns: List of matrices
"""
mats = []
for i, (b, t) in enumerate(upbound):
if t!="sparse": cmat = np.zeros((b, b - offs[i]), dtype=t)
else: cmat = dok_matrix((nw, nw - offs[i]), dtype=np.uint8) # last matrix is sparse matrix
mats.append(cmat)
return mats
def jobCoocc(inqueue, outqueue, wtoi, procID, lock, upbound, offs, wtol, winocc, Config):
"""
Job that computes co-occurrences which can be run in parallel.
:param inqueue: Jobs to do by process
:param outqueue: Results of process
:param wtoi: Dictionary mapping word to index
:param procID: Process ID
:param lock: Shared lock to manage access to critical resources
:param upbound: List of tuples containing word index and data type
:param offs: List of offsetted indices
:param wtol: Dictionary mapping word to lemma
:param winocc: Boolean for window-based co-occurrences, ie. True for window-based, False for document-based
:param Config: Configuration object
"""
if procID%3 == 1: print("Start ", procID)
mats = getMats(upbound, offs, len(wtoi))
rdocs = 0
while inqueue:
try:
(fname, content) = inqueue.get_nowait()
except queue.Empty:
time.sleep(0.51)
continue
if fname == "Stop":
inqueue.put((fname, content))
break
nrdocs = getWindowOrDocCoocc(content, mats, upbound, offs, wtoi, wtol, procID, rdocs, winocc, Config.maxWordsCoocc)
rdocs += nrdocs
print("Done Merge Own", procID, " Read Docs:", rdocs) #aggregate if possible
pchunk = 1 #chunks of a process
nmerges = 0
if upbound[-1][1] == "sparse": mats[-1] = mats[-1].tocsr() #only if count words
while True:
try:
cmats = []
lock.acquire()
(fname, npchunk) = outqueue.get_nowait()
for i in range(len(mats)):
m = outqueue.get_nowait()
if upbound[i][1] == "sparse": m = m.tocsr()
cmats.append(m)
lock.release()
for i, m in enumerate(cmats):
mats[i] += cmats[i]
pchunk += npchunk
nmerges += 1
except queue.Empty:
lock.release()
break
lock.acquire()
outqueue.put((fname, pchunk))
for m in mats: outqueue.put(m)
lock.release()
print("Done Merge Other", procID, nmerges)
def getWindowOrDocCoocc(docs, mats, lbound, offs, wtoi, wtol, procID, rdocs, winocc, maxWordsCoocc):
"""
Computes co-occurences of documents and updates co-occurrence matrices.
:param docs: List of documents
:param mats: List of placeholders matrices for co-occurrences
:param lbound: List of indices, eg. [10,100,1000], where 10 means up to element 10 use mat0, up to ind 100 mat1, ... if lastind use lastmat
:param offs: List of offsetted indices
:param wtoi: Dictionary mapping word to index
:param wtol: Dictionary mapping word to lemma
:param procID: Process ID
:param rdocs: Total count of processed documents
:param winocc: Boolean for window-based co-occurrences, ie. True for window-based, False for document-based
:param maxWordsCoocc: Maximum number of words to compute
:returns: Count of processed documents
"""
ndocs=0
for id,d in enumerate(docs):
words = d#.split(" ")
if len(words) < minwperdoc:
continue
if (rdocs+ndocs+1) % 1000 == 0:
print("Docs processed", rdocs+ndocs, procID)
ndocs+=1
if winocc:
#Get window based coocc in one doc
ocwo = [wtol[w] for w in words if w in wtol and wtol[w] in wtoi]
for iw,w in enumerate(ocwo):
indw =wtoi[w]
for w2 in ocwo[max(0,iw-winwid):min(len(ocwo),iw+winwid+1)]:
if w!=w2:
(minind, maxind) = (indw, wtoi[w2]) if indw < wtoi[w2] else (wtoi[w2], indw)
cb = 0
while maxind >= lbound[cb][0]: cb += 1
# if minind<offs[cb]: print(minind,maxind,cb,offs,lbound
mats[cb][minind, maxind - offs[cb]] += 1
else:
#Get document based coocc
uwords = list(set(words)) #[:rconfigLar.maxWordsdPerDoc]
if len(uwords) < maxWordsCoocc:
ocwo = [w for w in uwords if w in wtoi]
for iw, w in enumerate(ocwo): # use set to get only unique words
indw = wtoi[w]
for w2 in ocwo[iw+1:]:
#var lbound looks like [10,100,1000], where 10 means up to ele 10 use mat0, up to ind 100 mat1, ... if lastind use lastmat
(minind, maxind) = (indw, wtoi[w2]) if indw < wtoi[w2] else (wtoi[w2], indw)
cb=0
while maxind >= lbound[cb][0]: cb += 1
#if minind<offs[cb]: print(minind,maxind,cb,offs,lbound
mats[cb][minind, maxind-offs[cb]] += 1
print("skipped", len(docs)-ndocs, " of ", len(docs))
return ndocs
def countReduce(inqueue, upbound):
"""
Count co-occurrences in each queue
:param inqueue: Jobs to do by process
:param upbound: List of tuples containing word index and data type
:returns: Co-occurrence matrix
"""
lm = len(upbound) #need only length
mats = []
(_, pchunk) = inqueue.get_nowait()
for i in range(lm):
mats.append(inqueue.get_nowait())
while True:
try:
(_, cpchunk) = inqueue.get_nowait()
for i in range(len(mats)):
mats[i] += inqueue.get_nowait()
pchunk += cpchunk
print("Reduced #", cpchunk)
except queue.Empty:
break
return mats
def scheduleCooccJobs(Config, docs, wtoi, upbound, offs, wtol, wincoocc):
"""
Parallelizes computations for co-occurrence matrices and stores in files.
:param Config: Configuration object
:param docs: List of documents
:param wtoi: Dictionary mapping word to index
:param upbound: List of tuples containing word index and data type
:param offs: List of offsetted indices
:param wtol: Dictionary mapping word to lemma
:param wincoocc: Boolean for window-based co-occurrences, ie. True for window-based, False for document-based
"""
m = Manager()
q = m.Queue()
q1 = m.Queue()
lock = m.Lock()
q.put(("data",docs))
q.put(("Stop",None))
workers = [Process(target=jobCoocc, args=(q, q1, wtoi, i + 1, lock, upbound, offs, wtol, wincoocc, Config,)) for i in range(Config.nProc - 1)]
print("Starting ")
for w in workers: w.start()
jobCoocc(q, q1, wtoi, 99, lock, upbound, offs, wtol, wincoocc, Config)
print("Joining...")
for w in workers: w.join()
print("FinalRed...")
mats = countReduce(q1, upbound)
print("Done All cooc, need to store...")
fend = "_win" if wincoocc else "_doc"
for i, m in enumerate(mats): #
if upbound[i][1] == "sparse":
save_sparse(Config.fpath + cooccname+fend+Config.fending, mats[-1].tocsr())
else: np.save(Config.fpath + cooccname + "de"+str(i)+fend+Config.fending, mats[i])
#for m in mats: print(" su",np.sum(m))
print("Storing complete")
def GetUsedWords(Config, wcounts):
"""
Stores most frequent words in a file; these are used in analysis, eg. for co-occ matrix
:param Config: Configuration object
:param wcounts: Dictionary mapping word to occurence
"""
#allws=[w for d in docs for w in d]
#from collections import Counter
#wcounts=Counter(allws)
import operator
nwcounts = {k: v for k, v in wcounts.items() if v >= minOcc}
print("NWords ", len(wcounts), "NWords > ", minOcc, ": ", len(nwcounts)) # , " in found words: ", len(nwcounts)
sorted_all = sorted(nwcounts.items(), key=operator.itemgetter(1), reverse=True)
sorted_x = sorted_all[:Config.nWords]
fiveper = int((len(sorted_x) - 1) * 1.0 / 20 - 1)
print([(sorted_x[i * fiveper], i * fiveper) for i in range(min(len(sorted_x), 20))])
with open(Config.fpath + cooccWwithFreqname, "wb") as f: pickle.dump(sorted_x, f)
def loadCooc(Config, wincoocc):
"""
Loads co-occurence matrices from a given context (ie. window- or document-based).
:param Config: Configuration object
:param wincoocc: Boolean for window-based co-occurrences, ie. True for window-based, False for document-based
:returns: List of co-occurence matrices
"""
print("loading...",datetime.datetime.now())
with open(Config.fpath + cooccWwithFreqname, "rb") as f: usedWords=pickle.load(f)
print("loading...", datetime.datetime.now())
upbounds,offs = getDataTypeRanges(usedWords)
mats=[]
fend = "_win" if wincoocc else "_doc"
for i,b in enumerate(upbounds):
if b[1] == "sparse": m= load_sparse(Config.fpath + cooccname +fend+Config.fending)
else: m = np.load( Config.fpath +cooccname+ "de"+str(i)+fend+Config.fending+".npy")
print("Load mat",m.shape,type(m))
# nallp+=np.sum(m)
mats.append(m)
#print "loading...", datetime.datetime.now()
#coocc = scipy.io.mmread(fpath + wikiconfig + "wcoocc.sci.mtx")
#save_sparse_csr(fpath + wikiconfig + "wcoocc.sci", coocc.tocsr())
print("Done loading...",datetime.datetime.now())
return mats
def getVec(mats, iw, upbounds, offs, cWords):
"""
Gets a word's co-occurences.
:param mats: List of co-occurence matrices
:param iw: Target word
:param upbounds: List of tuples containing word index and data type
:param offs: List of offsetted indices
:param cWords: Number of words to analyze
:returns: List of the target word's co-occurences
"""
vec = np.zeros(cWords)
maxind = iw
# get all row entries
#print("nok")
for cb in range(len(mats)):
if mats[cb].shape[0]==0 or mats[cb].shape[0]<=iw: continue
nvpart = mats[cb][iw, :]
nvpart = np.asarray(nvpart.todense()).squeeze() if scipy.sparse.issparse(nvpart) else nvpart
#print(type(nvpart),nvpart.shape)
#nvpart = np.asarray(nvpart).squeeze() # reshape(max(nvpart.shape),1)
vec[:nvpart.shape[0]] += nvpart
cb = 0
#print("ok")
#
# while cb<len(mats)-1 and (not (maxind < offs[cb+1] and maxind>=offs[cb])) or (mats[cb].shape[0]==0): cb += 1 #
# print(mats[cb].shape,iw,offs[cb],cb)
# nvpart = mats[cb][iw,:]
# nvpart = nvpart.todense() if scipy.sparse.issparse(nvpart) else nvpart
# nvpart = np.asarray(nvpart).squeeze()#reshape(max(nvpart.shape),1)
# #print(len(nvpart), nvpart.shape,type(nvpart))
# #nv=nvpart.flatten()
# #print(len(nv),nv.shape)
# vec[:len(nvpart)]=nvpart
# cb+=1
# if cb==len(mats):return vec
#print(iw, iw - offs[cb], upbounds[cb][0], "bef", vec)
# get column for entry iw, ie. all words which are less frequent and have iw'<iw
while maxind >= upbounds[cb][0]: cb += 1
#print(" sh",mats[cb][:, iw - offs[cb]].shape)
nvpart = mats[cb][:, iw - offs[cb]]
nvpart = np.asarray(nvpart.todense()).squeeze() if scipy.sparse.issparse(nvpart) else nvpart
#nvpart = np.asarray(nvpart).squeeze()#nvpart.reshape(max(nvpart.shape), 1)
vec[:upbounds[cb][0]]+=nvpart#.flatten()
# print("aft", vec)
# print("nv",nvpart)
# while cb<len(mats): #upbounds[cb][0]
# nvpart=mats[cb][iw,:]
# nvpart = nvpart.todense() if scipy.sparse.issparse(nvpart) else nvpart
# #nvpart=nvpart.reshape(max(nvpart.shape),1)
# nvpart = np.asarray(nvpart).squeeze()
# vec[offs[cb]:upbounds[cb][0]]+=nvpart#.flatten() #[iw, :upbounds[cb][0] - offs[cb]]
# cb+=1
#
# print("aft2",vec)
return vec
def getCoocc(Config, docs, wCounts, wtol):
"""
Gets co-occurence matrices of words and store in files for both contexts (ie. window- or document-based).
Context dictates the definition of 'co-occurence', ie. either within the same window or the same document.
:param Config: Configuration object
:param docs: List of documents
:param wCounts: Dictionary mapping word to occurence
:param wtol: Dictionary mapping word to lemma
"""
#get counts and words
GetUsedWords(Config, wCounts)
with open(Config.fpath + cooccWwithFreqname, "rb") as f: sorted_words=pickle.load(f)
countDict ={k:v for k, v in sorted_words}
wtoi = {w[0]:i for i, w in enumerate(sorted_words)}
#get co-occurrences
#fdocs = [[w for w in d if w in wtoi] for d in docs ]
upbound, offs = getDataTypeRanges(sorted_words)
for wincoocc in [True,False]:
scheduleCooccJobs(Config, docs, wtoi, upbound, offs, wtol, wincoocc)
#get frequent words for each word as matrix
itow = {i:w for w, i in wtoi.items()}
# get weight for each word, eg. smoothed inverse frequency, motivated by pmi, pointwise mutual information
cWords = min(Config.nWords,len(itow))
mats = loadCooc(Config, wincoocc)
assDict = {}
assDictAbs = {}
nwin = sum([len(d)-2*winwid for d in docs]) #there are less full wins, since beginning and end only partial; they are still windows, but not as big
nScale = 1.0 / nwin # a pair occurs in a window with prob #winsize/nwin,
for i in range(cWords):
vraw = getVec(mats, i, upbound, offs, cWords) #get co-occurrence vector
#PMI score
totalOccs = np.array([countDict[itow[iw]] for iw in range(vraw.shape[0])])
px = np.clip(countDict[itow[i]]*nScale, 1e-10, 1)
py = np.clip(totalOccs *nScale, 1e-10, 1)
pxy = np.clip(vraw *nScale, 1e-30, 1)
npmi= (np.log(pxy/(px*py)) / -np.log(pxy))
def removeRare(l, distr): #if words occur very rarely, co-occ can be coincidentially ->
pCurr = distr* (totalOccs >= l)
vres = np.flip(np.sort(pCurr)[-Config.overviewMaxMissAssociations:], axis=0)
ires = np.flip(np.argsort(pCurr)[-Config.overviewMaxMissAssociations:], axis=0)
return list(zip(ires, vres))
assDict[i] = {"All":removeRare(30, npmi)}
assDictAbs[i] = {"All": removeRare(30, pxy)}
#print(itow[i],countDict[itow[i]],vraw )
#print("Percentiles of Occ: Top 20%, Top 80%",np.percentile(totalOccs, 20),np.percentile(totalOccs, 80))
fend = "_win" if wincoocc else "_doc"
with open(Config.fpath + Config.assSurname + fend + Config.fending +".pic", "wb") as f: pickle.dump(assDict, f)
with open(Config.fpath + Config.assAbsname + fend + Config.fending +".pic", "wb") as f: pickle.dump(assDictAbs, f)
with open(Config.fpath + Config.coiTowname + Config.fending + ".pic","wb") as f: pickle.dump(itow, f)
def generateCooccVecs(Config):
with open(Config.fpath + cooccWwithFreqname, "rb") as f: sorted_words = pickle.load(f)
#countDict = {k: v for k, v in sorted_words}
wtoi = {w[0]: i for i, w in enumerate(sorted_words)}
mats = loadCooc(Config,False)
cWords = min(Config.nWords, len(wtoi))
upbound, offs = getDataTypeRanges(sorted_words)
import scipy.stats
coen=[]
for i in range(cWords):
vraw=getVec(mats,i,upbound,offs,cWords) #get co-occurrence vector
cou=np.sum(vraw)+1e-20
ent=scipy.stats.entropy(vraw/cou)
coen.append((cou,ent))
with open(Config.fpath + Config.cooccVecs + Config.fending + ".pic", "wb") as f: pickle.dump(coen, f)
#load all nodes and edges to plot a graph
# def loadgraphDat():
# imat=np.load(fpath+Config.igname+fending+".npy")
# amat = np.load(fpath + Config.agname + fending + ".npy")
# vamat = np.load(fpath + Config.vagname + fending + ".npy")
# vmat = np.load(fpath + Config.vgname+fending+".npy")
# with open(fpath+Config.coiTowname,"rb") as f: coitow=pickle.load(f)
# return imat,vmat,itow,amat,vamat
if __name__ == '__main__':
#print("Reading jobads - limit", Config.nDoc)
# import jobads
# docs = jobads.readJobAds(nDoc)[:500]
# sdocs=[d.split(" ") for d in docs]
# sdocs=[[w for w in d if len(w)>1] for d in sdocs]
# getCoocc(sdocs)
from Config import Cfg
import pickle as pl
Conf = Cfg()
def printDict(rawName, samples=5):
fname = Conf.fpath + rawName + Conf.fending + ".pic"
with open(fname, "rb") as f:
data = pl.load(f)
return data
wToL = printDict(Conf.wToLemmaname)
#wcounts = printDict(Conf.wcountsname)
#perAd = printDict(Conf.perAdOccname)
#compwords = printDict(Conf.compoundsname)
coitow = printDict(Conf.coiTowname)
phrl = printDict(Conf.longwordPhrname, samples=0)
phrlDoc = printDict(Conf.longwordPhrDocsname, samples=0)
assDictWin = printDict(Conf.assSurname + "_win", samples=0)
assAbsDictWin = printDict(Conf.assAbsname + "_win", samples=0)
assDictDoc = printDict(Conf.assSurname + "_doc", samples=0)
assAbsDictDoc = printDict(Conf.assAbsname + "_doc", samples=0)
```
#### File: FrontEnd/vis_old/webAppMain.py
```python
import dash
import json
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from plotly.graph_objs import *
import networkx as nx
import numpy as np
import pickle
import hashlib
import re, string, nltk
from Config import * #own libs
import textTools
import FrontEnd.vis_old.toolsVisual as wtools
import FrontEnd.vis_old.topicVisual as aT
#There are 3 representationss of a word:with the same stem: 1) wmf: most frequent word form (e.g. house out of houses, House) 2) wt: transformed word (no plural word, stemmed) 3) ws: Word as given in sylabelles
#There are transformers from 1->2
#Possible improvement check if uppe case word exists or completly upper, this fixes Pyhton vs python and sql vs SQL
#load stored data
with open(dataPath+"longwordPhr"+fending+".pic", "rb") as f: longPhrases = pickle.load(f)
with open(dataPath+"biterms"+fending+".pic", "rb") as f: biterms = pickle.load(f)
with open(dataPath+"wcounts"+fending+".pic", "rb") as f: wCounts = pickle.load(f)
#with open(dataPath+"oWordMap.pic","rb") as f: oWordMap=pickle.load(f)
#global settings
initWords = [""]*maxW
unitWeights = True #use the same weight for all edges
initMap = [str(i) for i in range(maxT)]
initTop = aT.getTopTxt("")+initWords
#Overview of Webpage
app = dash.Dash()
#for icon not working from: https://community.plot.ly/t/including-page-titles-favicon-etc-in-dash-app/4648/8
#server = app.server
#@server.route('/favicon.ico')
#def favicon():
# return flask.send_from_directory(os.path.join(server.root_path, 'static'),
# 'favicon.ico')
app.title = 'CUUL'
app.layout = html.Div([
html.H2("Course Syllabus"),
#html.Div(className='divtextA', children=dcc.Textarea(id='textA', placeholder='Enter a value...', value='supervision supervising',style={'width': '100%'})),#'Python and R is all we teach. But Big data matters, too, and also quick data queries using sql! Soft skills like communication and team work should also be taught.',style={'width': '100%'})),
html.Div(className='divtextA', children=dcc.Textarea(id='textA',placeholder='Enter a value...',value='R SQL Python',style={'width': '100%'})),
#----Topics Part-----
html.H2("Topic Analysis of Job Ads"),
html.P("Show topics and their importance/weight; Click button to compute difference in topics of syllables and Job ads; Click Topic to see some info about it"),
html.Button(id='topJobButton', children='Sort topics by job ads relevance', n_clicks=0),
html.Button(id='topButton', children='Sort by difference "syllables - job ads"', n_clicks=0),
html.Div(id='butTText', children=[html.Button(id="ButT" + str(i), children=initTop[i], n_clicks=0,style={'white-space': 'normal', 'width': '200px', 'color': 'grey','fontSize': 15, 'padding': 5,'border': '0.5px solid rgba(0,0,0,0.1)', 'background': 'none'}) for i in range(maxT)]), #
html.Div(id='cacheB', children='tjb:0 tb:0 last:nan', style={'display': 'none'}),
html.Div(id='Word graphsT', children=[dcc.Graph(id="gr2"), html.Br()]),
html.Div(id='Word barT', children=[dcc.Graph(id="gr3"), html.Br(), html.P('Matching Job Ads', style={'font-weight': 'bold', 'color': 'grey', 'fontSize': 18})]),
#----Association Part-----
html.H2("Association Analysis"),
html.Button(id='sylButton', children='Show Overview', n_clicks=0),
html.P("Find words not in sylables that are associated with it"),
html.Div(id='butWText', children=[html.Button(id="ButW"+str(i), children=" ", n_clicks=0, style={'color': 'grey', 'fontSize': 15, 'padding': 5, 'border': 'none', 'background': 'none'}) for i in range(maxW)]), #
html.Br(),
html.Div(id='Word graphsW', children=[dcc.Graph(id="gr1"), html.Br(), html.P('Matching Job Ads', style={'font-weight':'bold', 'color': 'grey', 'fontSize': 18})]),#,figure=fig)
html.Div(id='cacheW', children=[html.P(','.join(["0"]*maxW)), html.P(','.join([""]*maxW))], style={'display': 'none'}),
html.Div(id='dummyOut', style={'display': 'none'}),
html.Div(id='cacheT', children=[html.P(','.join(["0"] * maxT)), html.P(','.join([""] * maxT))], style={'display': 'none'})
#, html.Div(id='cache2',children=[html.P(','.join(initMap))], style={'display': 'none'})
])
#---------------------------------
#Associationgraph Functionality
#---------------------------------
def getSingleWordGraphs(win, currW): #get graph of a single word w
G = nx.Graph()
G.add_node(win) #displayed word
tag = textTools.get_wordnet_pos(nltk.pos_tag(win)[0][1])
wt = textTools.changeWord(win, tag) #internal reference
wnodes = {}
if wt in wtools.wtoi: #if the word is not part of job ads, the graph will be just one word
ids = wtools.imat[wtools.wtoi[wt], :singleWordMaxAssociations] #get a limited number of related words
for id, weight in zip(ids, wtools.vmat[wtools.wtoi[wt], :singleWordMaxAssociations]):
G.add_node(wtools.itow[id])
G.add_edge(wt, wtools.itow[id], weight=1 if unitWeights else weight)
wnodes[wtools.itow[id]] = weight #the weight of a node is given by how much it is associated with the term
wnodes[wt] = max(wnodes.values()) if len(wnodes) else 1 #the center word shoudl be biggest
return wtools.getStyledGraph(G, currW, wnodes, tree=False) #wtools.oWordMap
dummyGraph,dummyanno = getSingleWordGraphs("EmptyValue", [])
def getMultiWordsOverview(ws):
nMatchEdges = overviewMaxMissAssociations#3*overviewMaxMissAssociations
edges = {}; wnodes = {}
matchwords = []
for cw in ws:
if cw in wtools.wtoi: matchwords.append(cw)
elif cw.lower() in wtools.wtoi: matchwords.append(cw.lower())
else: continue
griddata=[]
for w in matchwords: #add for each words strongest missing associations
if not w in wnodes: #add each word only once
wnodes[w] = np.mean(wtools.vamat[wtools.wtoi[w], :nMatchEdges])
cgrid = []
ids = wtools.amat[wtools.wtoi[w], :] # overviewMaxMissAssociations]
missFound = 0
#nEdges = 0
# wadd=[]
for ni, (id, weight) in enumerate(zip(ids, wtools.vamat[wtools.wtoi[w], :])): # overviewMaxMissAssociations]):
nodeID = wtools.itow[id]
while(nodeID in wnodes): nodeID+=" " #a missing word has already occurred, we duplicate it here by changing its ID
if nodeID not in matchwords and (missFound < overviewMaxMissAssociations):
missFound += 1
if wtools.itow[id] not in wtools.wtoi: wnodes[nodeID] = weight / 2
else: wnodes[nodeID] = np.mean(wtools.vamat[wtools.wtoi[wtools.itow[id]], :])
cgrid.append((nodeID if weight>0 else "NotFound", wnodes[nodeID]))
# if ni==nMatchEdges//2:
# cgrid.append((w, np.mean(wtools.vamat[wtools.wtoi[w], :nMatchEdges])))
# G.add_node(itow[id]) #duplicates are ignored by networkx #G.add_edge(w,itow[id],weight=weight)
eid = w + esep + nodeID # if w<itow[id] else itow[id]+esep+w
edges[eid] = 1 if unitWeights else weight
#nEdges += 1
#if nEdges >= nMatchEdges: break
while(len(cgrid)<overviewMaxMissAssociations): cgrid.append(("NotFound",0))
cgrid.insert(overviewMaxMissAssociations // 2, ((w, np.mean(wtools.vamat[wtools.wtoi[w], :nMatchEdges]))))
griddata.append(cgrid)
# if w in list(matchwords)[:15]: print(w,wadd)
allw = sorted(wnodes.items(), key=lambda x: x[1], reverse=True)
mw = [x[0] for x in allw[:maxDrawNodes]]
mw = set(mw).union(matchwords)
#for w in mw: G.add_node(w) # duplicates are ignored by networkx
wnodes={k: v for k, v in wnodes.items() if k in mw}
if griddata is None or len(griddata) == 0:
global dummyGraph
return dummyGraph
else:
return wtools.getGridOverviewGraphs(ws, wnodes, griddata) #wtools.oWordMap
#Analyze sylabelles -> Handle click
@app.callback(Output('butWText', 'children'), [Input('sylButton', 'n_clicks')], [State('textA', 'value')]) #,[Input('testSubmit', 'id'),Input('textA', 'value')]
def display_graphs(id, text):
graphs = []
global biterms
rtext = text.replace("\n", " ").replace("\t", " ").replace(" ", " ").replace(" ", " ")
words = textTools.docSpacePunctuation(rtext).split(" ")
words = words+initWords
fwords = []
lw = words[0] if len(words) else ""
doskip = False
for w in words[1:]:
bt = lw.lower() + textTools.bsep + w.lower()
if bt in biterms:
fwords.append(bt)
doskip = True
else:
if not doskip: fwords.append(lw)
doskip = False
lw = w
currW = fwords[:maxW]
allt = "".join(currW)
hash_object = hashlib.md5(allt.encode()).hexdigest()
wtools.logActivity(str(hash_object) + ","+text)
for iw, w in enumerate(currW):
if len(w):
#chw, chwMap, dw = textTools.transWord(w, wtools.oWordMap)
chw, dw = textTools.transWord(w)#, wtools.oWordMap)
wInAds = (dw in wtools.wtoi or chw.lower() in wtools.wtoi or chw in wtools.wtoi)#chwMap.lower() in wtools.wtoi or chwMap in wtools.wtoi or
#print(w, chw, dw, wInAds)
ccol = 'grey' if (len(chw) == 0 and not wInAds) else ('lightgreen' if wInAds else 'red') #green if occurs, grey if stopword or so, red if not occurring
graphs.append(html.Button(id="ButW"+str(iw), children=w, n_clicks=0, style={'font-weight': 'bold', 'color': ccol, 'fontSize': 14, 'padding': 4 if len(w) else 0, 'border': '0.5px solid rgba(0,0,0,0.1)' if len(w) else 'none', 'background': 'none'}))
return graphs
#Analyze a word -> Handle click on word button
@app.callback(dash.dependencies.Output('Word graphsW', 'children'), [Input('ButW'+str(i), 'n_clicks') for i in range(maxW)], [State('ButW'+str(i), 'children') for i in range(maxW)]+[State('cacheW', 'children')]) #textContent
def update_output(*args):
global dummyGraph, wordPhrases#,biterms
clicks = args[:maxW]
currW = args[maxW:2*maxW]
caChildren = args[2*maxW]
lclicks = [int(v) for v in caChildren[0]['props']['children'].split(",")]
savedWords = caChildren[1]['props']['children']
found = False
tjobads = [html.P('Matching Job Ads', style={'font-weight':'bold','color': 'grey', 'fontSize': 18})]
def handleSingleWord(word):
#chw, chwMap, dw = textTools.transWord(word, wtools.oWordMap)
chw, dw = textTools.transWord(word)#, wtools.oWordMap)
G, anno = getSingleWordGraphs(chw, currW)#(chwMap, currW)
tit = "Associations with <b>" + dw + "</b>"
wordBag = [chw, dw]#chwMap, dw]
#print(chwMap, chwMap in longPhrases, dw in longPhrases, chw in longPhrases, word in longPhrases, len(longPhrases))
matches = np.isin(wordBag, list(longPhrases.keys()))
if np.any(matches):#if dw in wordPhrases:
wInLong = wordBag[np.where(matches)[0][0]]
for iad, ad in enumerate(longPhrases[wInLong]):#wordPhrases[dw]):
# tjobads.append(html.P(str(iad)+"..."+ad+"...", style={'color': 'grey', 'fontSize': 15}))##dcc.Markdown("..."+ad+"**...**"))
tjobads.append(dcc.Markdown(str(iad) + " ..." + ad.replace(dw, " **_" + word + "_** ").replace(dw.title(), " **_" + dw.title() + "_** ") + "...")) ##dcc.Markdown("..."+ad+"**...**"))
return G, anno, tit
if len(currW) == 1: #just 1 word
G, anno, tit = handleSingleWord(currW[0])
found = True
elif savedWords == ",".join(currW):
for i, (l1, l2) in enumerate(zip(lclicks, clicks)):
if l1 < l2:
G, anno, tit = handleSingleWord(currW[i])
found = True
break
if not found:
if len(currW) == 0 or (len(currW[0]) == 0 and len(currW[1]) == 0):
G, anno = (dummyGraph, dummyanno)
else:
taggedtext= nltk.pos_tag([t for t in currW if len(t)>0])
taggedtext=[(w, textTools.get_wordnet_pos(t)) for w, t in taggedtext]
tWords = [textTools.changeWord(w, t) for w, t in taggedtext] # tWords = [oWordMap.get(w, "") for w in tWords]
#print("tW",tWords)
#print("cW",currW)
tWords = [t for t in tWords if len(t) > 0]
res = getMultiWordsOverview(tWords)
G, anno = (dummyGraph, dummyanno) if res is None else res
tit = "Overview"
height = 1000 if found else max(int(10+len(tWords)*50), 225)
fig = Figure(data=Data([G[0], G[1]]), layout=Layout(title='<br>'+tit, annotations=anno,
titlefont=dict(size=14), showlegend=False, hovermode='closest', height=height, margin=dict(b=20, l=5, r=5, t=20),
xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False), yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False)))
allt = "".join(currW)
hash_object = hashlib.md5(allt.encode()).hexdigest()
wtools.logActivity(str(hash_object) + ","+tit+","+str(sum(clicks)))
return [dcc.Graph(id="gr1", figure=fig), html.Br()]+tjobads
@app.callback(dash.dependencies.Output('cacheW','children'),[Input('Word graphsW','children'),Input('butWText', 'children')],[State('cacheW','children')]+[State('ButW'+str(i), 'n_clicks') for i in range(maxW)]+[State('ButW'+str(i), 'children') for i in range(maxW)]) #textContent
def update_output2(*args):
caChildren = args[1]
buttonProps = [w['props']['children'] for w in caChildren]
savedWords = ",".join(buttonProps)
currW = ",".join([str(c) for c in args[-maxW:]])
clicks = args[-2*maxW:-maxW] if savedWords == currW else [0]*maxW
return [html.P(",".join([str(c) for c in clicks])), html.P(currW)]
@app.callback(dash.dependencies.Output('dummyOut', 'children'),[dash.dependencies.Input('gr1', 'hoverData')],[State('ButW'+str(i), 'children') for i in range(maxW)])
def update_text(*args):
hoverData=args[0]
if hoverData is not None and "points" in hoverData:
if len(hoverData["points"])>0 and "text" in hoverData["points"][0]:
currW = args[1:]
allt = "".join(currW)
hash_object = hashlib.md5(allt.encode()).hexdigest()
wtools.logActivity(str(hash_object) + ","+"hoverJson," + json.dumps(hoverData["points"][0]["text"][:100]))
return [html.P("a")]
#--------------------------
#Topic functionality (could go into own file)
#---------------------------
def getAllTopicGraphs(tWords, sortByDiff=False): #Empty means get standard
graphs = []
topTxt = aT.getTopTxt(tWords, sortByDiff)+initWords ##chw,dw=textTools.transWord(w,wtools.oWordMap)
for iw, w in enumerate(topTxt[:maxT]):
#[ for i in range(len(initTop))]),
#ccol= 'grey' if len(chw)==0 else ('lightgreen' if dw.lower() or dw in wtools.wtoi else 'red') #green if occurs, grey if stopword or so, red if not occurring
graphs.append(html.Button(id="ButT" + str(iw), children=w, n_clicks=0, style={'white-space': 'normal', 'width': '200px', 'color': 'grey','fontSize': 15, 'padding': 5 if len(w) else 0,'border': '0.5px solid rgba(0,0,0,0.1)' if len(w) else 'none', 'background': 'none'}))
return graphs
def display_TopicGraphs(id, text, sortbyDiff=False):
global biterms
fwords = []
lines = text.split("\n")
for l in lines:
sentences = textTools.tokenizer.tokenize(l)
for sen in sentences:
#sen=sen.replace(" ")
sen = re.sub('['+string.punctuation+']', '', sen)
words = textTools.docSpacePunctuation(sen).split(" ") #split into tokens
lw = words[0] if len(words) else ""
doskip = False
for w in words[1:]:
bt = lw.lower() + textTools.bsep + w.lower()
if bt in biterms:
fwords.append(bt)
doskip = True
else:
if not doskip: fwords.append(lw)
doskip = False
lw = w
currW = (fwords+initWords)[:maxW]
allt = "".join(currW)
hash_object = hashlib.md5(allt.encode()).hexdigest()
wtools.logActivity(str(hash_object) + ", TopicGraph,"+text)
taggedtext = nltk.pos_tag([t for t in currW if len(t) > 0])
taggedtext = [(w, textTools.get_wordnet_pos(t)) for w, t in taggedtext]
tWords = [textTools.changeWord(w, t) for w, t in taggedtext]
return getAllTopicGraphs(tWords, sortbyDiff)
#Topic Stuff Analyze sylabelles -> Handle click on analyze
@app.callback(Output('butTText', 'children'), [Input('cacheB', 'children')], [State('textA', 'value')]) #,[Input('testSubmit', 'id'),Input('textA', 'value')]
def display_diffGraphsSorted(*args):
prev_clicks = dict([i.split(':') for i in args[0].split(' ')])
text = args[1]
if (prev_clicks['last'] == 'tjb'):
return display_TopicGraphs(prev_clicks['tjb'], text, False)
elif (prev_clicks['last'] == 'tb'):
return display_TopicGraphs(prev_clicks['tb'], text, True)
else:
return display_TopicGraphs(0, text, False)
@app.callback(dash.dependencies.Output('cacheB', 'children'), [Input('topJobButton', 'n_clicks'), Input('topButton', 'n_clicks')], [State('cacheB', 'children')])
def update_Topbutton(tjb_clicks, tb_clicks, prev_clicks):
prev_clicks = dict([i.split(':') for i in prev_clicks.split(' ')])
last_clicked = 'nan'
if tjb_clicks > int(prev_clicks['tjb']):
last_clicked = 'tjb'
elif tb_clicks > int(prev_clicks['tb']):
last_clicked = 'tb'
cur_clicks = 'tjb:{} tb:{} last:{}'.format(tjb_clicks, tb_clicks, last_clicked)
return cur_clicks
@app.callback(dash.dependencies.Output('Word graphsT','children'),[Input('ButT'+str(i), 'n_clicks') for i in range(maxT)],[State('cacheT','children')]+[State('ButT'+str(i), 'children') for i in range(maxT)]) #,State('cache2','children')
def update_Topoutput(*args):
global dummyGraph,wordPhrases,biterms
found = False
caChildren = args[maxT]
lclicks = [int(v) for v in caChildren[0]['props']['children'].split(",")]
#orderChildren = args[maxT+1]
#topOrder = [int(v) for v in orderChildren[0]['props']['children'].split(",")]
for i,(l1,l2) in enumerate(zip(lclicks,args[:maxT])):
if l1!=l2:
# iTo=topOrder[i]
butT=args[maxT+1+i] #Text of button
iTo=int(butT[1:].split(" ")[0])
G,anno=aT.getTopicGraphs(iTo)
tit = "Topic <b>" +str(iTo)+"</b>"
found=True
break
if not found:
G, anno = aT.getTopicGraphs(0)
tit = "Topic <b>" + str(0) + "</b>"
fig= Figure(data=Data([G[0],G[1]]),layout=Layout(title='<br>'+tit,annotations=anno,
titlefont=dict(size=14), showlegend=False, hovermode='closest',height= 800,width=800,margin=dict(b=20,l=5,r=5,t=20),
xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False), yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False)))
return [dcc.Graph(id="gr2",figure=fig),html.Br()]
@app.callback(dash.dependencies.Output('Word barT','children'),[Input('ButT'+str(i), 'n_clicks') for i in range(maxT)],[State('cacheT','children')]+[State('ButT'+str(i), 'children') for i in range(maxT)]) #,State('cache2','children')
def update_Topoutput3(*args):
global dummyGraph,wordPhrases,biterms
found = False
caChildren = args[maxT]
lclicks = [int(v) for v in caChildren[0]['props']['children'].split(",")]
#orderChildren = args[maxT+1]
#topOrder = [int(v) for v in orderChildren[0]['props']['children'].split(",")]
for i,(l1,l2) in enumerate(zip(lclicks,args[:maxT])):
if l1!=l2:
# iTo=topOrder[i]
butT=args[maxT+1+i] #Text of button
iTo=int(butT[1:].split(" ")[0])
B = aT.getTopicBar(iTo)
tit = "Topic <b>" +str(iTo)+"</b>"
found=True
break
if not found:
B = aT.getTopicBar(0)
tit = "Topic <b>" + str(0) + "</b>"
fig= Figure(data=[B],layout=Layout(title='<br>'+tit,
titlefont=dict(size=14), showlegend=False, hovermode='closest', height= 400,width=800,margin=dict(b=20,l=100,r=20,t=50),
xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=True), yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=True)))
return [dcc.Graph(id="gr3",figure=fig),html.Br()]
@app.callback(dash.dependencies.Output('cacheT','children'),[Input('Word graphsT','children'),Input('butTText', 'children')],[State('cacheT','children')]+[State('ButT'+str(i), 'n_clicks') for i in range(maxT)]) #textContent
def update_Topoutput2(*args):
caChildren=args[1]
buttonProps=[w['props']['children'] for w in caChildren]
# savedWords=",".join(buttonProps)
clicks = args[-maxT:]
return [html.P(",".join([str(c) for c in clicks]))]
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "joshand/clientsim",
"score": 2
} |
#### File: client_sim/templatetags/extras.py
```python
import json
from django import template
register = template.Library()
@register.filter
def pretty_json(value):
try:
j = json.dumps(json.loads(value), indent=4)
except Exception:
j = value
return j
```
#### File: clientsim/scripts/clean_tasks.py
```python
from scripts.dblog import *
# from django_apscheduler.models import DjangoJobExecution
def cleanup():
# DjangoJobExecution.objects.delete_old_job_executions(3600)
log = []
task_types = ["client_monitor", "cloud_monitor", "interface_monitor", "network_monitor"]
for t in task_types:
tasks = Task.objects.filter(description=t)[:25].values_list("id", flat=True)
Task.objects.filter(description=t).exclude(pk__in=list(tasks)).delete()
append_log(log, "task_cleanup::", t)
t1, _ = Task.objects.get_or_create(
description="task_cleanup"
)
t1.task_data = "\n".join(log)
t1.last_update = make_aware(datetime.datetime.now())
t1.save()
def run():
cleanup()
```
#### File: clientsim/scripts/client_monitor.py
```python
import docker
from docker import types
# from apscheduler.schedulers.background import BackgroundScheduler
from client_sim.models import *
from django.conf import settings
from django.utils.timezone import make_aware
from io import BytesIO
from django.db.models import F
from scripts.dblog import *
import traceback
import sys
# def dolog(fn, step, *txt):
# l = Log.objects.create(function=fn, step=step, log=",".join(map(str, txt)))
# l.save()
def check_parse_files(dockerfile, cmdout):
ss = ServerSetting.objects.all()
if len(ss) != 1:
print("No ServerSetting defined (or multiple defined, which is not allowed). Not fixing up dockerfile...")
return dockerfile
df = dockerfile[:]
# df = df.replace("{{workdir}}", str(os.path.dirname(os.path.realpath("manage.py"))) + "/upload")
df.replace("COPY", "ADD")
while df.find("{<") != -1:
f_start = df.find("{<")
f_end = df.find(">}", f_start)
fn = df[f_start + 2:f_end]
upload = Upload.objects.filter(file__endswith=fn)
if len(upload) != 1:
cmdout += "Unable to locate file '" + fn + "' in Uploads!"
return None
url = (ss[0].baseurl + "file/" + str(upload[0].id))
df = df.replace("{<" + fn + ">}", url)
return df
def create_docker_containers(client, containers, log, delete_existing=False):
for c in containers:
cmdout = ""
newcli = None
if delete_existing:
# dolog("sync_docker_containers", "deleting_existing_container", c.clientid, c)
cmdout += "Deleting existing container " + str(c.clientid) + "\n"
try:
ret = client.containers.get(c.clientid).remove(force=True)
except Exception as e:
# dolog("sync_docker_containers", "deleting_existing_container", "error", e)
cmdout += "Exception " + str(e) + "\n"
c.force_rebuild = False
c.last_deployed_hash = None
c.clientid = None
c.skip_sync = True
c.save()
try:
if c.container.containertype.name == "PUBLISHED":
if c.network:
newcli = client.containers.run(c.container.path, c.container.cmd, network=c.network.dockernetwork(),
mac_address=c.macaddress, hostname=c.hostname,
name=c.dockercontainername(), detach=True, tty=True, remove=True)
elif c.bridge:
newcli = client.containers.run(c.container.path, c.container.cmd, network=c.bridge.dockernetwork(),
mac_address=c.macaddress, hostname=c.hostname,
name=c.dockercontainername(), detach=True, tty=True, remove=True)
# dolog("sync_docker_containers", "create_new_container_published", newcli)
cmdout += "New Published Container " + str(newcli) + "\n"
elif c.container.containertype.name == "DOCKERFILE":
df = str(c.container.get_dockerfile()) + "\n"
df = check_parse_files(df, cmdout)
f = BytesIO(df.encode('utf-8'))
try:
base_path = str(os.path.dirname(os.path.realpath("manage.py"))) + "/upload"
client2 = docker.APIClient(base_url='unix://var/run/docker.sock')
response = [line for line in client2.build(
fileobj=f, rm=True, tag=c.container.buildcontainername, path=base_path
)]
# newimg = client.images.build(fileobj=f, custom_context=True, tag=c.container.buildcontainername)
# dolog("sync_docker_containers", "create_new_image", "success", response)
# print("DOCKERFILE", c.container, str(response))
cmdout += "New Build Image " + str(response) + "\n"
except Exception as e:
# print(sys.exc_info()[-1].tb_lineno, "\n", sys.exc_info())
# dolog("sync_docker_containers", "create_new_image", "error", e)
# print(traceback.print_exc())
append_log(log, "sync_docker::create_docker_containers::exception re-creating container...", e,
traceback.print_exc())
cmdout += "Build Image Exception " + str(e) + "\n"
try:
if c.bridge:
net = c.bridge.dockernetwork()
else:
net = c.network.dockernetwork()
try:
port_bindings = json.loads(str(c.portbind))
except Exception as e:
append_log(log, "sync_docker::create_docker_containers::error reading portbinding json")
port_bindings = {}
# print(c.container.buildcontainername, c.container.cmd, net, c.portbind, c.macaddress, c.hostname, c.dockercontainername())
if c.container.cmd:
newcli = client.containers.run(c.container.buildcontainername, c.container.cmd,
network=net, ports=port_bindings,
mac_address=c.macaddress, hostname=c.hostname,
name=c.dockercontainername(),
detach=True, tty=True, remove=True)
else:
newcli = client.containers.run(c.container.buildcontainername,
network=net, ports=port_bindings,
mac_address=c.macaddress, hostname=c.hostname,
name=c.dockercontainername(),
detach=True, tty=True, remove=True)
# dolog("sync_docker_containers", "create_new_container_built", "success", newcli)
cmdout += "New Build Container " + str(newcli) + "\n"
except Exception as e:
# print(sys.exc_info()[-1].tb_lineno, "\n", sys.exc_info())
# dolog("sync_docker_containers", "create_new_container_built", "error", e)
append_log(log, "sync_docker::create_docker_containers::exception re-creating container...", e,
traceback.print_exc())
cmdout += "Build Container Exception " + str(e) + "\n"
except Exception as e:
# print(sys.exc_info()[-1].tb_lineno, "\n", sys.exc_info())
append_log(log, "sync_docker::create_docker_containers::exception re-creating container...", e, sys.exc_info())
# print(c, c.container.path, c.container.cmd, newcli)
if newcli:
# ipaddr = newcli.attrs['NetworkSettings']['IPAddress']
# if ipaddr is None or ipaddr == "":
# ipaddr = newcli.attrs['NetworkSettings']['Networks'][c.network.dockernetwork()]['IPAddress']
# c.ipaddress = ipaddr
dt = make_aware(datetime.datetime.now())
c.clientid = newcli.id
c.last_sync = dt
c.last_update = dt
c.skip_sync = True
c.last_sync_log = str(cmdout)
c.save()
def sync_container_ips(containers, log):
client = docker.from_env()
for c in containers:
try:
newcli = client.containers.get(c.clientid)
except:
append_log(log, "sync_docker::create_docker_containers::exception getting client id... clearing client id from db")
c.clientid = None
c.save()
return None
ipaddr = newcli.attrs['NetworkSettings']['IPAddress']
if ipaddr is None or ipaddr == "":
if c.network:
ipaddr = newcli.attrs['NetworkSettings']['Networks'][c.network.dockernetwork()]['IPAddress']
else:
ipaddr = newcli.attrs['NetworkSettings']['Networks'][c.bridge.dockernetwork()]['IPAddress']
# print("containerip", c.clientid, ipaddr)
c.ipaddress = ipaddr
c.skip_sync = True
c.save()
def sync_docker_clients():
docker_container_list = []
log = []
client = docker.from_env()
try:
dconts = client.containers.list()
except Exception as e:
append_log(log, "sync_docker_clients::exception getting Docker client list::is Docker installed and running?::", e)
db_log("client_monitor", log)
return ""
append_log(log, "sync_docker_networks::full_docker_network_list::", dconts)
# First, check to see if all relevant Docker clients exist in the database. If not, import them.
for dn in dconts:
docker_container_list.append(dn.id)
containers = Client.objects.filter(clientid__iexact=dn.id)
if len(containers) <= 0:
append_log(log, "import_docker_containers_into_db", dn.id)
dt = make_aware(datetime.datetime.now())
Client.objects.create(clientid=dn.id, description="Imported from Docker", active=False, last_sync=dt, last_update=dt)
# clear clientid for any container in db that doesn't actually exist in Docker
clients = Client.objects.exclude(clientid__in=docker_container_list).update(clientid=None)
# Second, check to see if there are any clients in database that do not exist in Docker
conts = Client.objects.filter(clientid__isnull=True).filter(active=True)
# print("missing clientid=", conts)
append_log(log, "sync_docker::create_docker_containers::phase_1", conts)
create_docker_containers(client, conts, log)
# Last, see if any clients have been updated and need to be re-synced
conts = Client.objects.all().exclude(last_sync=F('last_update'))
# print("out of sync=", conts)
append_log(log, "sync_docker::create_docker_containers::phase_2", conts)
create_docker_containers(client, conts, log, delete_existing=True)
# Next, check to see if there are any clients in database that are tagged with 'force_rebuild'
conts = Client.objects.filter(force_rebuild=True)
# print("force_rebuild=", conts)
append_log(log, "sync_docker::create_docker_containers::phase_3", conts)
create_docker_containers(client, conts, log, delete_existing=True)
# Sync Container IPs
sync_container_ips(Client.objects.all(), log)
# Next, send script to client
conts = Client.objects.filter(clientid__isnull=False)
for c in conts:
cmdout = ""
if c.dockercontainerscripthash() != "":
if (str(c.last_deployed_hash) != str(c.dockercontainerscripthash())) or c.force_script:
append_log(log, "sync_docker::create_docker_containers::script_deployment", c)
c.force_script = False
c.skip_sync = True
c.save()
scr = c.dockercontainerscript()
scr = scr.replace("\r\n", "{{br}}").replace("\n", "{{br}}").replace("\r", "{{br}}").replace("{{br}}", "\n")
# .replace("\r\n", "{{br}}").replace("\n", "{{br}}").replace("\r", "{{br}}").replace("{{br}}", "\\n")
cmd = "bash -c \"echo '" + scr + "' > ~/script.sh\""
start = "bash /root/script.sh &"
# print(cmd)
try:
cmd_restart = client.containers.get(c.clientid).restart()
append_log(log, "sync_docker::container_restart", cmd_restart, "::next_cmd::" + cmd)
# dolog("sync_docker::client_monitor", "script_update", "cont_restart", cmd_restart)
cmdout += "Restart Container: " + str(cmd_restart) + "\n"
cmd_res = client.containers.get(c.clientid).exec_run(cmd)
cmdout += "Execute Command: " + str(cmd) + "\n"
append_log(log, "sync_docker::create_docker_containers::code_deploy", cmd_res)
cmdout += "Command Result: " + str(cmd_res) + "\n"
# if cmd_res.exit_code != 0:
# dolog("client_monitor", "script_update", "error", cmd, cmd_res)
# else:
# dolog("client_monitor", "script_update", "success", cmd, cmd_res)
start_res = client.containers.get(c.clientid).exec_run(start, detach=True)
append_log(log, "sync_docker::create_docker_containers::script_start", start_res)
cmdout += "Start Script Result: " + str(cmd_res) + "\n"
# if start_res.exit_code != 0:
# dolog("client_monitor", "script_update", "error", start, start_res)
# else:
# dolog("client_monitor", "script_update", "success", start, start_res)
c.last_deployed_hash = c.dockercontainerscripthash()
c.skip_sync = True
c.last_sync_log = str(cmdout)
c.save()
append_log(log, "sync_docker::create_docker_containers::done")
except Exception as e:
append_log(log, "sync_docker::script_update::exception", e)
# dolog("sync_docker_containers", "updating_container_script", "error", e)
cmdout += "Exception: " + str(e) + "\n"
else:
if c.force_script:
c.force_script = False
c.skip_sync = True
c.save()
# else:
# print("no script update", str(c.last_deployed_hash), str(c.dockercontainerscripthash()))
db_log("client_monitor", log)
def delete_container(container_id):
client = docker.from_env()
log = []
clients = Client.objects.filter(id=container_id)
for n in clients:
if n.clientid:
client = client.containers.get(n.clientid)
append_log(log, "delete_docker_container", n.clientid, client.name)
client.remove()
else:
append_log(log, "delete_docker_client::No clientID. Doesn't exist?")
db_log("client_monitor", log)
def get_container_logs(container_id):
client = docker.from_env()
clients = Client.objects.filter(id=container_id)
for n in clients:
if n.clientid:
client = client.containers.get(n.clientid)
logs = client.logs()
return logs.decode("UTF-8").replace("\r\n", "<br>").replace("\n", "<br>")
return "Error: Unable to get logs for client. Is it running?"
def run():
sync_docker_clients()
```
#### File: clientsim/scripts/test_network_proxy.py
```python
import socket
import threading
import sys
from client_sim.models import *
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
import time
ERROR_RSV = ""
ERROR_CMD = ""
def dolog(fn, step, *txt):
l = Log.objects.create(function=fn, step=step, log=",".join(map(str, txt)))
l.save()
def handle(buffer):
return buffer
def transfer(src, dst):
src_name = src.getsockname()
src_address = src_name[0]
src_port = src_name[1]
dst_name = dst.getsockname()
dst_address = dst_name[0]
dst_port = dst_name[1]
print("[+] Starting transfer [%s:%d] => [%s:%d]" % (src_name, src_port, dst_name, dst_port))
while True:
buffer = src.recv(0x1000)
if not buffer:
print("[-] No data received! Breaking...")
break
# print "[+] %s:%d => %s:%d [%s]" % (src_address, src_port, dst_address, dst_port, repr(buffer))
print("[+] %s:%d => %s:%d => Length : [%d]" % (src_address, src_port, dst_address, dst_port, len(buffer)))
dst.send(handle(buffer))
print("[+] Closing connecions! [%s:%d]" % (src_address, src_port))
src.close()
print("[+] Closing connecions! [%s:%d]" % (dst_address, dst_port))
dst.close()
SOCKS_VERSION = 5
ERROR_VERSION = "[-] Client version error!"
ERROR_METHOD = "[-] Client method error!"
# ALLOWED_METHOD = [0, 2]
ALLOWED_METHOD = [0]
def socks_selection(socket):
client_version = ord(socket.recv(1))
print("[+] client version : %d" % (client_version))
if not client_version == SOCKS_VERSION:
socket.shutdown(socket.SHUT_RDWR)
socket.close()
return (False, ERROR_VERSION)
support_method_number = ord(socket.recv(1))
print("[+] Client Supported method number : %d" % (support_method_number))
support_methods = []
for i in range(support_method_number):
method = ord(socket.recv(1))
print("[+] Client Method : %d" % (method))
support_methods.append(method)
selected_method = None
for method in ALLOWED_METHOD:
if method in support_methods:
selected_method = 0
if selected_method == None:
socket.shutdown(socket.SHUT_RDWR)
socket.close()
return (False, ERROR_METHOD)
print("[+] Server select method : %d" % (selected_method))
response = chr(SOCKS_VERSION) + chr(selected_method)
socket.send(response.encode("utf-8"))
return (True, socket)
CONNECT = 1
BIND = 2
UDP_ASSOCIATE = 3
IPV4 = 1
DOMAINNAME = 3
IPV6 = 4
CONNECT_SUCCESS = 0
ERROR_ATYPE = "[-] Client address error!"
RSV = 0
BNDADDR = "\x00" * 4
BNDPORT = "\x00" * 2
def socks_request(local_socket):
client_version = ord(local_socket.recv(1))
print("[+] client version : %d" % (client_version))
if not client_version == SOCKS_VERSION:
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_VERSION)
cmd = ord(local_socket.recv(1))
if cmd == CONNECT:
print("[+] CONNECT request from client")
rsv = ord(local_socket.recv(1))
if rsv != 0:
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_RSV)
atype = ord(local_socket.recv(1))
if atype == IPV4:
# dst_address = ("".join(["%d." % (ord(i)) for i in local_socket.recv(4)]))[0:-1]
dst_address = socket.inet_ntoa(local_socket.recv(4))
print("[+] IPv4 : %s" % (dst_address))
dst_port = ord(local_socket.recv(1)) * 0x100 + ord(local_socket.recv(1))
print("[+] Port : %s" % (dst_port))
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print("[+] Connecting : %s:%s" % (dst_address, dst_port))
remote_socket.connect((dst_address, dst_port))
response = ""
response += chr(SOCKS_VERSION)
response += chr(CONNECT_SUCCESS)
response += chr(RSV)
response += chr(IPV4)
response += BNDADDR
response += BNDPORT
local_socket.send(response.encode("utf-8"))
print("[+] Tunnel connected! Tranfering data...")
r = threading.Thread(target=transfer, args=(
local_socket, remote_socket))
r.start()
s = threading.Thread(target=transfer, args=(
remote_socket, local_socket))
s.start()
return (True, (local_socket, remote_socket))
except socket.error as e:
print(e)
remote_socket.shutdown(socket.SHUT_RDWR)
remote_socket.close()
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
elif atype == DOMAINNAME:
domainname_length = ord(local_socket.recv(1))
domainname = ""
for i in range(domainname_length):
domainname += (local_socket.recv(1))
print("[+] Domain name : %s" % (domainname))
dst_port = ord(local_socket.recv(1)) * 0x100 + ord(local_socket.recv(1))
print("[+] Port : %s" % (dst_port))
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print("[+] Connecting : %s:%s" % (domainname, dst_port))
remote_socket.connect((domainname, dst_port))
response = ""
response += chr(SOCKS_VERSION)
response += chr(CONNECT_SUCCESS)
response += chr(RSV)
response += chr(IPV4)
response += BNDADDR
response += BNDPORT
local_socket.send(response)
print("[+] Tunnel connected! Tranfering data...")
r = threading.Thread(target=transfer, args=(
local_socket, remote_socket))
r.start()
s = threading.Thread(target=transfer, args=(
remote_socket, local_socket))
s.start()
return (True, (local_socket, remote_socket))
except socket.error as e:
print(e)
remote_socket.shutdown(socket.SHUT_RDWR)
remote_socket.close()
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
elif atype == IPV6:
dst_address = int(local_socket.recv(4).encode("hex"), 16)
print("[+] IPv6 : %x" % (dst_address))
dst_port = ord(local_socket.recv(1)) * 0x100 + ord(local_socket.recv(1))
print("[+] Port : %s" % (dst_port))
remote_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
remote_socket.connect((dst_address, dst_port))
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_ATYPE)
else:
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_ATYPE)
elif cmd == BIND:
# TODO
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_CMD)
elif cmd == UDP_ASSOCIATE:
# TODO
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_CMD)
else:
local_socket.shutdown(socket.SHUT_RDWR)
local_socket.close()
return (False, ERROR_CMD)
return (True, local_socket)
def server(local_host, local_port, max_connection):
try:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((local_host, local_port))
server_socket.listen(max_connection)
print('[+] Server started [%s:%d]' % (local_host, local_port))
while True:
local_socket, local_address = server_socket.accept()
print('[+] Detect connection from [%s:%s]' % (local_address[0], local_address[1]))
result = socks_selection(local_socket)
if not result[0]:
print("[-] socks selection error!")
break
result = socks_request(result[1])
if not result[0]:
print("[-] socks request error!")
break
# local_socket, remote_socket = result[1]
# TODO : loop all socket to close...
print("[+] Releasing resources...")
local_socket.close()
print("[+] Closing server...")
server_socket.close()
print("[+] Server shuted down!")
except KeyboardInterrupt:
print(' Ctl-C stop server')
try:
remote_socket.close()
except:
pass
try:
local_socket.close()
except:
pass
try:
server_socket.close()
except:
pass
return
def main():
LOCAL_HOST = '0.0.0.0'
LOCAL_PORT = int(9011)
#REMOTE_HOST = sys.argv[3]
#REMOTE_PORT = int(sys.argv[4])
MAX_CONNECTION = 0x10
server(LOCAL_HOST, LOCAL_PORT, MAX_CONNECTION)
# Enable the job scheduler to run schedule jobs
cron = BackgroundScheduler()
# Explicitly kick off the background thread
cron.start()
cron.remove_all_jobs()
job0 = cron.add_job(main)
# Shutdown your cron thread if the web process is stopped
atexit.register(lambda: cron.shutdown(wait=False))
if __name__ == '__main__':
main()
``` |
{
"source": "joshand/dashboard-api-python",
"score": 2
} |
#### File: dashboard-api-python/generator/generate_snippets.py
```python
import json
import os
import re
import shutil
import sys
import requests
from jinja2 import Template
CALL_TEMPLATE = Template('''import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
{{ parameter_assignments }}
response = dashboard.{{ section }}.{{ operation}}({{ parameters }})
print(response)
''')
REVERSE_PAGINATION = ['getNetworkEvents', 'getOrganizationConfigurationChanges']
# Helper function to convert camel case parameter name to snake case
def snakify(param):
ret = ''
for s in param:
if s.islower():
ret += s
elif s == '_':
ret += '_'
else:
ret += '_' + s.lower()
return ret
# Helper function to return pagination parameters depending on endpoint
def generate_pagination_parameters(operation):
ret = {
'total_pages': {
'type': 'integer or string',
'description': 'total number of pages to retrieve, -1 or "all" for all pages',
},
'direction': {
'type': 'string',
'description': 'direction to paginate, either "next" or "prev" (default) page' if operation in
REVERSE_PAGINATION else 'direction to paginate, either "next" (default) or "prev" page',
}
}
return ret
# Helper function to return parameters within OAS spec, optionally based on list of input filters
def parse_params(operation, parameters, param_filters=[]):
if parameters is None:
return {}
# Create dict with information on endpoint's parameters
params = {}
for p in parameters:
name = p['name']
if 'schema' in p:
keys = p['schema']['properties']
for k in keys:
if 'required' in p['schema'] and k in p['schema']['required']:
params[k] = {'required': True}
else:
params[k] = {'required': False}
params[k]['in'] = p['in']
params[k]['type'] = keys[k]['type']
params[k]['description'] = keys[k]['description']
if 'enum' in keys[k]:
params[k]['enum'] = keys[k]['enum']
if 'example' in p['schema'] and k in p['schema']['example']:
params[k]['example'] = p['schema']['example'][k]
elif 'required' in p and p['required']:
params[name] = {'required': True}
params[name]['in'] = p['in']
params[name]['type'] = p['type']
if 'description' in p:
params[name]['description'] = p['description']
else:
params[name]['description'] = '(required)'
if 'enum' in p:
params[name]['enum'] = p['enum']
else:
params[name] = {'required': False}
params[name]['in'] = p['in']
params[name]['type'] = p['type']
params[name]['description'] = p['description']
if 'enum' in p:
params[name]['enum'] = p['enum']
# Add custom library parameters to handle pagination
if 'perPage' in params:
params.update(generate_pagination_parameters(operation))
# Return parameters based on matching input filters
if not param_filters:
return params
else:
ret = {}
if 'required' in param_filters:
ret.update({k: v for k, v in params.items() if 'required' in v and v['required']})
if 'pagination' in param_filters:
ret.update(generate_pagination_parameters(operation) if 'perPage' in params else {})
if 'optional' in param_filters:
ret.update({k: v for k, v in params.items() if 'required' in v and not v['required']})
if 'path' in param_filters:
ret.update({k: v for k, v in params.items() if 'in' in v and v['in'] == 'path'})
if 'query' in param_filters:
ret.update({k: v for k, v in params.items() if 'in' in v and v['in'] == 'query'})
if 'body' in param_filters:
ret.update({k: v for k, v in params.items() if 'in' in v and v['in'] == 'body'})
if 'array' in param_filters:
ret.update({k: v for k, v in params.items() if 'in' in v and v['type'] == 'array'})
if 'enum' in param_filters:
ret.update({k: v for k, v in params.items() if 'enum' in v})
return ret
# Generate text for parameter assignments
def process_assignments(parameters):
text = '\n'
for k, v in parameters.items():
param_name = snakify(k)
if param_name == 'id':
param_name = 'id_'
if v == 'list':
text += f'{param_name} = []\n'
elif v == 'float':
text += f'{param_name} = 0.0\n'
elif v == 'int':
text += f'{param_name} = 0\n'
elif v == 'bool':
text += f'{param_name} = False\n'
elif v == 'dict':
text += f'{param_name} = {{}}\n'
elif v == 'str':
text += f'{param_name} = \'\'\n'
else:
if type(v) == str:
value = f'\'{v}\''
else:
value = v
text += f'{param_name} = {value}\n'
return text
def main():
# Get latest OpenAPI specification
spec = requests.get('https://api.meraki.com/api/v1/openapiSpec').json()
# Only care about the first 10 tags, which are the 10 scopes for organizations, networks, devices, & 7 products
# scopes = ['organizations', 'networks', 'devices',
# 'appliance', 'camera', 'cellularGateway', 'insight', 'sm', 'switch', 'wireless']
tags = spec['tags']
paths = spec['paths']
scopes = {tag['name']: {} for tag in tags[:10]}
# Organize data
operations = []
for path, methods in paths.items():
for method in methods:
endpoint = paths[path][method]
tags = endpoint['tags']
operation = endpoint['operationId']
operations.append(operation)
scope = tags[0]
if path not in scopes[scope]:
scopes[scope][path] = {method: endpoint}
else:
scopes[scope][path][method] = endpoint
# Generate API libraries
for scope in scopes:
print(f'...generating {scope}')
section = scopes[scope]
for path, methods in section.items():
for method, endpoint in methods.items():
# Get metadata
tags = endpoint['tags']
operation = endpoint['operationId']
description = endpoint['summary']
parameters = endpoint['parameters'] if 'parameters' in endpoint else None
responses = endpoint['responses'] # not actually used here for library generation
required = {}
optional = {}
if parameters:
if 'perPage' in parse_params(operation, parameters):
pagination = True
else:
pagination = False
for p, values in parse_params(operation, parameters, 'required').items():
if 'example' in values:
required[p] = values['example']
elif p == 'organizationId':
required[p] = '549236'
elif p == 'networkId':
required[p] = 'L_646829496481105433' # DevNet Sandbox ALWAYS ON network @ https://n149.meraki.com/o/-t35Mb/manage/organization/overview
elif p == 'serial':
required[p] = 'Q2QN-9J8L-SLPD'
elif values['type'] == 'array':
required[p] = 'list'
elif values['type'] == 'number':
required[p] = 'float'
elif values['type'] == 'integer':
required[p] = 'int'
elif values['type'] == 'boolean':
required[p] = 'bool'
elif values['type'] == 'object':
required[p] = 'dict'
elif values['type'] == 'string':
required[p] = 'str'
else:
sys.exit(p, values)
if pagination:
if operation not in REVERSE_PAGINATION:
optional['total_pages'] = 'all'
else:
optional['total_pages'] = 3
for p, values in parse_params(operation, parameters, 'optional').items():
if 'example' in values:
optional[p] = values['example']
if operation == 'createNetworkGroupPolicy':
print(required)
print(optional)
if 'code_snippets' not in os.listdir():
os.mkdir('code_snippets')
with open(f'code_snippets/{operation}.py', 'w', encoding='utf-8') as fp:
if required.items():
parameters_text = '\n '
for k, v in required.items():
param_name = snakify(k)
if param_name == 'id':
param_name = 'id_'
parameters_text += f'{param_name}, '
for k, v in optional.items():
if k == 'total_pages' and v == 'all':
parameters_text += f'total_pages=\'all\''
elif k == 'total_pages' and v == 1:
parameters_text += f'total_pages=1'
elif type(v) == str:
parameters_text += f'\n {k}=\'{v}\', '
else:
parameters_text += f'\n {k}={v}, '
if parameters_text[-2:] == ', ':
parameters_text = parameters_text[:-2]
parameters_text += '\n'
else:
parameters_text = ''
fp.write(
CALL_TEMPLATE.render(
parameter_assignments=process_assignments(required),
section=scope,
operation=operation,
parameters=parameters_text,
)
)
if __name__ == '__main__':
main()
``` |
{
"source": "joshanderson-kw/angel_system",
"score": 3
} |
#### File: python/angel_utils/conversion.py
```python
from typing import Dict
from typing import Hashable
from typing import Iterable
from typing import List
from typing import Tuple
import numpy as np
from angel_msgs.msg import ObjectDetection2dSet
from smqtk_detection.utils.bbox import AxisAlignedBoundingBox
def from_detect_image_objects_result(
detections: Iterable[Tuple[AxisAlignedBoundingBox, Dict[Hashable, float]]],
detection_threshold: float = 0,
) -> ObjectDetection2dSet:
"""
Convert an iterable of detection results from a smqtk-detection
DetectImageObjects algorithm instance into a new ObjectDetection2dSet
message.
This function does *not* touch the `header` or `source_stamp` fields of the
ObjectDetection2dSet message output. The user of the instance should
populate those fields appropriately according to the context.
:param detections: Iterable of detection prediction results.
:param detection_threshold: Do not include detections whose maximum
confidence score is less than this threshold value.
:returns: New ObjectDetection2dSet instance containing the detections.
"""
# We'll be taking multiple passes over detections, so make sure it is
# expanded.
detections = tuple(detections)
# Aggregate all detections, create "master set" of labels, ordered
# alphabetically for determinism.
label_union = set()
for _, det_preds in detections:
label_union.update(det_preds.keys())
label_list = sorted(label_union)
# List of (left, top, right, bottom) quadruples per detection.
# Should end up with shape [n_detections x 4] after threshold filtering.
det_lrtb = []
# Matrix of detection confidences
# Should end up with shape [n_detections x n_labels] after threshold
# filtering.
det_confidence = []
for i, (det_bbox, det_preds) in enumerate(detections):
# Get predicted confidences in the order determined above, filling in
# 0's for labels not present in this particular prediction.
confidence_vec = [det_preds.get(label, 0.0) for label in label_list]
# Skip detection if the maximally confident class is less than
# our confidence threshold. If "background" or equivalent classes
# are included in the chosen detector, then every class may be
# output...
max_conf = np.max(confidence_vec)
if max_conf < detection_threshold:
continue
det_confidence.append(confidence_vec)
det_lrtb.append((*det_bbox.min_vertex, *det_bbox.max_vertex))
assert len(det_lrtb) == len(det_confidence)
n_detections = len(det_confidence)
# If there are no detections post-filtering, empty out the label vec since
# there will be nothing in this message to refer to it.
if n_detections == 0:
label_list = []
msg = ObjectDetection2dSet()
msg.label_vec = label_list
msg.num_detections = n_detections
if n_detections > 0:
bounds_mat = np.asarray(det_lrtb, dtype=np.float32).T
msg.left = bounds_mat[0].tolist()
msg.top = bounds_mat[1].tolist()
msg.right = bounds_mat[2].tolist()
msg.bottom = bounds_mat[3].tolist()
msg.label_confidences = np.asarray(det_confidence, dtype=np.float64).ravel().tolist()
return msg
def to_confidence_matrix(msg: ObjectDetection2dSet) -> np.ndarray:
"""
Get the detection predicted confidences as a 2D matrix.
:param msg: Message to get the matrix confidences from.
:return: New numpy ndarray of 2 dimensions with shape [nDets x nClasses].
"""
return (
np.asarray(msg.label_confidences)
.reshape((msg.num_detections, len(msg.label_vec)))
)
``` |
{
"source": "joshanderson-kw/SMQTK-Classifier",
"score": 2
} |
#### File: impls/classify_descriptor_supervised/sklearn_svm.py
```python
import collections
import logging
import pickle
from typing import Any, Dict, Hashable, Iterable, Iterator, Mapping, Optional, Sequence, Union
import warnings
import numpy as np
from smqtk_dataprovider import from_uri
from smqtk_descriptors import DescriptorElement
from smqtk_classifier.interfaces.classify_descriptor_supervised import ClassifyDescriptorSupervised
LOG = logging.getLogger(__name__)
try:
# noinspection PyPackageRequirements
import scipy.stats # type: ignore
except ImportError:
warnings.warn(
"scipy.stats not importable: SkLearnSvmClassifier will not be usable."
)
scipy = None
try:
from sklearn import svm
except ImportError:
warnings.warn(
"svm not importable: SkLearnSvmClassifier will not be usable."
)
svm = None
class SkLearnSvmClassifier (ClassifyDescriptorSupervised):
"""
Classifier that wraps the SkLearn SVM (Support Vector Machine)
SVC (C-Support Vector Classification) module.
Model file paths are optional. If they are given and the file(s) exist,
we will load them. If they do not, we treat the path(s) as the output
path(s) for saving a model after calling ``train``. If this is None
(default), no model is loaded nor output via training, thus any model
trained will only exist in memory during the lifetime of this instance.
:param svm_model_uri: Path to the model file.
:param C: Regularization parameter passed to SkLearn SVM SVC model.
:param kernel: Kernel type passed to SkLearn SVM SVC model.
:param probability: Whether to enable probability estimates or not.
:param calculate_class_weights: Whether to manually calculate the
class weights to be passed to the SVM model or not.
Defaults to true. If false, all classes will be given equal weight.
:param normalize: Normalize input vectors to training and
classification methods using ``numpy.linalg.norm``. This may either
be ``None``, disabling normalization, or any valid value that
could be passed to the ``ord`` parameter in ``numpy.linalg.norm``
for 1D arrays. This is ``None`` by default (no normalization).
"""
# noinspection PyDefaultArgument
def __init__(
self,
svm_model_uri: Optional[str] = None,
C: float = 2.0, # Regularization parameter
kernel: str = 'linear', # Kernel type
probability: bool = True, # Enable probabilty estimates
calculate_class_weights: bool = True, # Enable calculation of class weights
normalize: Optional[Union[int, float, str]] = None,
):
super(SkLearnSvmClassifier, self).__init__()
self.svm_model_uri = svm_model_uri
# Elements will be None if input URI is None
#: :type: None | smqtk.representation.DataElement
self.svm_model_elem = \
svm_model_uri and from_uri(svm_model_uri)
self.C = C
self.kernel = kernel
self.probability = probability
self.calculate_class_weights = calculate_class_weights
self.normalize = normalize
# Validate normalization parameter by trying it on a random vector
if normalize is not None:
self._norm_vector(np.random.rand(8))
# generated parameters
self.svm_model: Optional[svm.SVC] = None
self._reload_model()
@classmethod
def is_usable(cls) -> bool:
return None not in {scipy, svm}
def get_config(self) -> Dict[str, Any]:
return {
"svm_model_uri": self.svm_model_uri,
"C": self.C,
"kernel": self.kernel,
"probability": self.probability,
"calculate_class_weights": self.calculate_class_weights,
"normalize": self.normalize,
}
def _reload_model(self) -> None:
"""
Reload SVM model from configured file path.
"""
if self.svm_model_elem and not self.svm_model_elem.is_empty():
svm_model_tmp_fp = self.svm_model_elem.write_temp()
with open(svm_model_tmp_fp, 'rb') as f:
self.svm_model = pickle.load(f)
self.svm_model_elem.clean_temp()
def _norm_vector(self, v: np.ndarray) -> np.ndarray:
"""
Class standard array normalization. Normalized along max dimension (a=0
for a 1D array, a=1 for a 2D array, etc.).
:param v: Vector to normalize
:return: Returns the normalized version of input array ``v``.
"""
if self.normalize is not None:
n = np.linalg.norm(v, self.normalize, v.ndim - 1,
keepdims=True)
# replace 0's with 1's, preventing div-by-zero
n[n == 0.] = 1.
return v / n
# Normalization off
return v
def has_model(self) -> bool:
"""
:return: If this instance currently has a model loaded. If no model is
present, classification of descriptors cannot happen.
:rtype: bool
"""
return self.svm_model is not None
def _train(
self,
class_examples: Mapping[Hashable, Iterable[DescriptorElement]]
) -> None:
train_labels = []
train_vectors = []
train_group_sizes: Dict = {} # number of examples per class
# Making SVM label assignment deterministic to lexicographical order
# of the type repr.
# -- Can't specifically guarantee that dict key types will all support
# less-than operator, however we can always get some kind of repr
# which is a string which does support less-than. In the common case
# keys will be strings and ints, but this "should" handle more
# exotic cases, at least for the purpose of ordering keys reasonably
# deterministically.
for i, l in enumerate(sorted(class_examples, key=lambda e: str(e))):
# requires a sequence, so making the iterable ``g`` a tuple
g = class_examples[l]
if not isinstance(g, collections.abc.Sequence):
LOG.debug(' (expanding iterable into sequence)')
g = tuple(g)
train_group_sizes[l] = float(len(g))
x = np.array(DescriptorElement.get_many_vectors(g))
x = self._norm_vector(x)
train_labels.extend([l] * x.shape[0])
train_vectors.extend(x)
del g, x
assert len(train_labels) == len(train_vectors), \
"Count mismatch between parallel labels and descriptor vectors" \
"(%d != %d)" \
% (len(train_labels), len(train_vectors))
# Calculate class weights
weights = None
if self.calculate_class_weights:
weights = {}
# (john.moeller): The weighting should probably be the geometric
# mean of the number of examples over the classes divided by the
# number of examples for the current class.
gmean = scipy.stats.gmean(list(train_group_sizes.values()))
for i, g in enumerate(train_group_sizes):
w = gmean / train_group_sizes[g]
weights[g] = w
self.svm_model = svm.SVC(C=self.C,
kernel=self.kernel,
probability=self.probability,
class_weight=weights)
LOG.debug("Training SVM model")
self.svm_model.fit(train_vectors, train_labels)
if self.svm_model_elem and self.svm_model_elem.writable():
LOG.debug("Saving model to element (%s)", self.svm_model_elem)
self.svm_model_elem.set_bytes(pickle.dumps(self.svm_model))
def get_labels(self) -> Sequence[Hashable]:
if self.svm_model is not None:
return list(self.svm_model.classes_)
else:
raise RuntimeError("No model loaded")
def _classify_arrays(self, array_iter: Union[np.ndarray, Iterable[np.ndarray]]) -> Iterator[Dict[Hashable, float]]:
if self.svm_model is None:
raise RuntimeError("No SVM model present for classification")
# Dump descriptors into a matrix for normalization and use in
# prediction.
vec_mat = np.array(list(array_iter))
vec_mat = self._norm_vector(vec_mat)
svm_model_labels = self.get_labels()
if self.svm_model.probability:
proba_mat = self.svm_model.predict_proba(vec_mat)
for proba in proba_mat:
yield dict(zip(svm_model_labels, proba))
else:
c_base = {label: 0.0 for label in svm_model_labels}
proba_mat = self.svm_model.predict(vec_mat)
for p in proba_mat:
c = dict(c_base)
c[p] = 1.0
yield c
```
#### File: tests/interfaces/test_classify_image_supervised.py
```python
from typing import Any, Dict, Hashable, Iterator, List, Mapping, Sequence
import unittest
import unittest.mock as mock
import numpy as np
from smqtk_classifier import ClassifyImageSupervised
from smqtk_classifier.interfaces.classification_element import CLASSIFICATION_DICT_T
from smqtk_classifier.interfaces.classify_image import IMAGE_ITER_T
from smqtk_classifier.exceptions import ExistingModelError
class DummySupervisedClassifier (ClassifyImageSupervised):
EXPECTED_LABELS = ['constant']
EXPECTED_HAS_MODEL = False
def get_config(self) -> Dict[str, Any]: ...
def get_labels(self) -> Sequence[Hashable]: ...
def classify_images(self, img_iter: IMAGE_ITER_T) -> Iterator[CLASSIFICATION_DICT_T]: ...
def has_model(self) -> bool:
return self.EXPECTED_HAS_MODEL
def _train(
self,
class_examples: Mapping[Hashable, IMAGE_ITER_T]
) -> None: ...
class TestSupervisedClassifierAbstractClass (unittest.TestCase):
test_classifier: DummySupervisedClassifier
@classmethod
def setUpClass(cls) -> None:
cls.test_classifier = DummySupervisedClassifier()
def test_train_hasModel(self) -> None:
# Calling the train method should fail the class also reports that it
# already has a model. Shouldn't matter what is passed to the method
# (or lack of things passed to the method).
self.test_classifier.EXPECTED_HAS_MODEL = True
self.assertRaises(
ExistingModelError,
self.test_classifier.train, {}
)
#
# Testing train abstract function functionality. Method currently does not
# care what the value for labels are.
#
def test_train_noModel_noExamples(self) -> None:
self.test_classifier.EXPECTED_HAS_MODEL = False
self.assertRaises(
ValueError,
self.test_classifier.train, {}
)
def test_train_noModel_oneExample_classExamples(self) -> None:
self.test_classifier.EXPECTED_HAS_MODEL = False
input_class_examples = {
'label_1': [0, 1, 2],
}
self.assertRaises(
ValueError,
self.test_classifier.train, input_class_examples
)
def test_train_noModel_classExamples_only(self) -> None:
self.test_classifier.EXPECTED_HAS_MODEL = False
input_class_examples: Dict[Hashable, List[np.ndarray]] = {
'label_1': [mock.Mock(spec=np.ndarray)],
'label_2': [mock.Mock(spec=np.ndarray)],
'label_3': [mock.Mock(spec=np.ndarray)],
'special symbolLabel +here': [mock.Mock(spec=np.ndarray)],
}
# Intentionally not passing np.ndarray's here.
self.test_classifier._train = mock.MagicMock() # type: ignore
self.test_classifier.train(class_examples=input_class_examples)
self.test_classifier._train.assert_called_once_with(
input_class_examples
)
``` |
{
"source": "joshanderson-kw/SMQTK-Dataprovider",
"score": 2
} |
#### File: impls/data_element/psql.py
```python
import logging
import hashlib
from threading import RLock
from typing import Dict, Optional
from smqtk_dataprovider import DataElement
from smqtk_dataprovider.utils.postgres import (
norm_psql_cmd_string,
PsqlConnectionHelper,
)
# Try to import required modules
try:
import psycopg2
import psycopg2.extensions
except ImportError:
psycopg2 = None
LOG = logging.getLogger(__name__)
# Lock for data element create-table functionality
GLOBAL_PSQL_TABLE_CREATE_RLOCK = RLock()
class PostgresDataElement (DataElement): # lgtm [py/missing-equals]
"""
Data element bytes stored in PostgreSQL database.
Storage table should have three columns for the following components:
- data SHA1 (effective UID)
- data content-type / MIMETYPE
- data bytes
Efficient connection pooling may be achieved via external utilities like
PGBounder.
Due to the use of the "ON CONFLICT" clause in upserting data, this
implementation requires at least PostgreSQL version 9.5 or greater.
"""
# SHA1 checksum of 0-length data (empty bytes)
EMPTY_SHA = hashlib.sha1(b'').hexdigest()
class CommandTemplates (object):
""" Encapsulation of command templates. """
# Upsert table for storage if desired
#
# Format params:
# - table_name
# - id_col
# - sha1_col
# - mime_col
# - byte_col
UPSERT_TABLE = norm_psql_cmd_string("""
CREATE TABLE IF NOT EXISTS {table_name:s} (
{id_col:s} TEXT NOT NULL,
{sha1_col:s} TEXT NOT NULL,
{mime_col:s} TEXT NOT NULL,
{byte_col:s} BYTEA NOT NULL,
PRIMARY KEY ({id_col:s})
);
""")
# Select ``col`` for a given entry ID.
#
# Query Format params:
# - col
# - table_name
# - id_col
#
# Value params:
# - id_val
SELECT = norm_psql_cmd_string("""
SELECT {col:s}
FROM {table_name:s}
WHERE {id_col:s} = %(id_val)s
;
""")
# Upsert content-type/data for a uid
#
# Query Format params:
# - table_name
# - id_col
# - sha1_col
# - mime_col
# - byte_col
#
# Value params:
# - id_val
# - sha1_val
# - mime_val
# - byte_val
#
# SQL format from:
# https://hashrocket.com/blog/posts/upsert-records-with-postgresql-9-5
#
UPSERT_DATA = norm_psql_cmd_string("""
INSERT INTO {table_name:s} ({id_col:s}, {sha1_col:s}, {mime_col:s}, {byte_col:s})
VALUES ( %(id_val)s, %(sha1_val)s, %(mime_val)s, %(byte_val)s )
ON CONFLICT ({id_col:s})
DO UPDATE
SET ({sha1_col:s}, {mime_col:s}, {byte_col:s})
= (EXCLUDED.{sha1_col:s}, EXCLUDED.{mime_col:s}, EXCLUDED.{byte_col:s})
;
""")
# Same as ``UPSERT_DATA`` but does not set the mimetype on an update.
# This is meant to atomically update the byte data without changing the
# existing mimetype.
UPSERT_DATA_NO_MIME = norm_psql_cmd_string("""
INSERT INTO {table_name:s} ({id_col:s}, {sha1_col:s}, {mime_col:s}, {byte_col:s})
VALUES ( %(id_val)s, %(sha1_val)s, %(mime_val)s, %(byte_val)s )
ON CONFLICT ({id_col:s})
DO UPDATE
SET ({sha1_col:s}, {byte_col:s})
= (EXCLUDED.{sha1_col:s}, EXCLUDED.{byte_col:s})
;
""")
@classmethod
def is_usable(cls) -> bool:
if psycopg2 is None:
LOG.warning("Not usable. Requires the psycopg2 module.")
return False
return True
def __init__(
self,
element_id: str,
content_type: Optional[str] = None,
table_name: str = "psql_data_elements",
id_col: str = "id",
sha1_col: str = "sha1",
mime_col: str = "mime",
byte_col: str = "bytes",
db_name: str = "postgres",
db_host: Optional[str] = "/tmp",
db_port: Optional[int] = 5433,
db_user: Optional[str] = None,
db_pass: Optional[str] = None,
read_only: bool = False,
create_table: bool = True
):
"""
Create a new PostgreSQL-based data element.
If the tabled mapped to the provided ``table_name`` already exists, we
expect the provided columns to match the following types:
- ``id_col`` is expected to be TEXT
- ``sha1_col`` is expected to be TEXT
- ``type_col`` is expected to be TEXT
- ``byte_col`` is expected to be BYTEA
Default database connection parameters are assuming the use of a
non-default, non-postgres-user cluster where the current user's name is
equivalent to a valid role in the database.
:param element_id: ID to reference a specific data element row in the
table. This is required in the same way that a path is required to
point to a file on a filesystem.
:type element_id: str
:param content_type: Expected mime-type of byte data set to this
element. This only affects setting the mime-type field when setting
new bytes. ``content_type()`` will always reflect what is stored in
the backend, or lack there-of.
If this mime-type differs from an existing stored value,
this mime-type will overwrite the stored value on the next call to
``set_bytes``. If this is None and there is no mime-type already
set in the database, no mime-type will be set on the next
``set_bytes`` call.
:type content_type: str | None
:param table_name: String label of the table in the database to interact
with.
:type table_name: str
:param id_col: Name of the element ID column in ``table_name``.
:type id_col: str
:param sha1_col: Name of the SHA1 column in ``table_name``.
:type sha1_col: str
:param mime_col: Name of the MIMETYPE column in ``table_name``.
:type mime_col: str
:param byte_col: Name of the column storing byte data in ``table_name``.
:type byte_col: str
:param db_host: Host address of the PostgreSQL server. If None, we
assume the server is on the local machine and use the UNIX socket.
This might be a required field on Windows machines (not tested yet).
:type db_host: str | None
:param db_port: Port the Postgres server is exposed on. If None, we
assume a default port (5433).
:type db_port: int | None
:param db_name: The name of the database to connect to.
:type db_name: str
:param db_user: Postgres user to connect as. If None, postgres
defaults to using the current accessing user account name on the
operating system.
:type db_user: str | None
:param db_pass: Password for the user we're connecting as. This may be
None if no password is to be used.
:type db_pass: str | None
:param read_only: Only allow reading of this data. Modification actions
will throw a ReadOnlyError exceptions.
:type read_only: bool
:param create_table: If this instance should try to create the storing
table before actions are performed against it. If the configured
user does not have sufficient permissions to create the table and it
does not currently exist, an exception will be raised.
:type create_table: bool
"""
super(PostgresDataElement, self).__init__()
if not isinstance(element_id, str):
raise ValueError("Element ID should be a string type for this "
"implementation. Database storage is typed.")
self._element_id = element_id
self._content_type = content_type
self._table_name = table_name
self._id_col = id_col
self._sha1_col = sha1_col
self._mime_col = mime_col
self._byte_col = byte_col
self._read_only = read_only
self._create_table = create_table
# itersize is hard-coded because a single-element perspective should
# only be retrieving one row at a time.
self._psql_helper = PsqlConnectionHelper(
db_name, db_host, db_port, db_user, db_pass, 10,
GLOBAL_PSQL_TABLE_CREATE_RLOCK
)
# Set table creation SQL in helper
if not self._read_only:
self._psql_helper.set_table_upsert_sql(
self.CommandTemplates.UPSERT_TABLE.format(
table_name=self._table_name,
id_col=self._id_col,
sha1_col=self._sha1_col,
mime_col=self._mime_col,
byte_col=byte_col,
)
)
def __repr__(self) -> str:
return "{:s}[id=\"{:s}\"]" \
.format(self.__class__.__name__, self._element_id)
def get_config(self) -> Dict:
"""
Return a JSON-compliant dictionary that could be passed to this class's
``from_config`` method to produce an instance with identical
configuration.
:return: JSON type compliant configuration dictionary.
:rtype: dict
"""
return {
"element_id": self._element_id,
"table_name": self._table_name,
"id_col": self._id_col,
"sha1_col": self._sha1_col,
"mime_col": self._mime_col,
"byte_col": self._byte_col,
"db_name": self._psql_helper.db_name,
"db_host": self._psql_helper.db_host,
"db_port": self._psql_helper.db_port,
"db_user": self._psql_helper.db_user,
"db_pass": self._psql_helper.db_pass,
"read_only": self._read_only,
"create_table": self._create_table,
}
def content_type(self) -> Optional[str]:
"""
:return: Standard type/subtype string for this data element, or None if
the content type is unknown.
:rtype: str or None
"""
q = self.CommandTemplates.SELECT.format(
col=self._mime_col,
table_name=self._table_name,
id_col=self._id_col,
)
v = dict(
id_val=self._element_id
)
def cb(cursor: psycopg2.extensions.cursor) -> None:
"""
:type cursor: psycopg2.extensions.cursor
"""
cursor.execute(q, v)
r = list(self._psql_helper.single_execute(cb, yield_result_rows=True))
if not r:
return None
elif len(r) > 1:
raise RuntimeError("Somehow found multiple entries for the same"
"element ID (there should only be one).")
return r[0][0]
def is_empty(self) -> bool:
"""
Check if this element contains no bytes.
The intent of this method is to quickly check if there is any data
behind this element, ideally without having to read all/any of the
underlying data.
:return: If this element contains 0 bytes.
:rtype: bool
"""
q = self.CommandTemplates.SELECT.format(
col="octet_length(%s)" % self._byte_col,
table_name=self._table_name,
id_col=self._id_col,
)
v = dict(
id_val=self._element_id
)
def cb(cursor: psycopg2.extensions.cursor) -> None:
"""
:type cursor: psycopg2.extensions.cursor
"""
cursor.execute(q, v)
r = list(self._psql_helper.single_execute(cb, yield_result_rows=True))
if not r:
# No rows returned, meaning not entry for our element ID and no
# bytes stored.
return True
elif len(r) > 1:
raise RuntimeError("Somehow found multiple entries for the same"
"element ID (there should only be one).")
num_bytes = int(r[0][0])
if num_bytes == 0:
# There was an entry, but the number of bytes stored was zero.
return True
else:
# Non-zero number of bytes stored.
return False
def sha1(self) -> str:
"""
Get the SHA1 checksum of this element's binary content.
:return: SHA1 hex checksum of the data content.
:rtype: str
"""
q = self.CommandTemplates.SELECT.format(
col=self._sha1_col,
table_name=self._table_name,
id_col=self._id_col,
)
v = dict(
id_val=self._element_id,
)
def cb(cursor: psycopg2.extensions.cursor) -> None:
"""
:type cursor: psycopg2.extensions.cursor
"""
cursor.execute(q, v)
r = list(self._psql_helper.single_execute(cb, yield_result_rows=True))
if not r:
# no rows for element ID, so no bytes. Return SHA1 of empty string
return self.EMPTY_SHA
return r[0][0]
def get_bytes(self) -> bytes:
"""
:return: Get the bytes for this data element.
:rtype: bytes
"""
q = self.CommandTemplates.SELECT.format(
col=self._byte_col,
table_name=self._table_name,
id_col=self._id_col,
)
v = dict(
id_val=self._element_id
)
def cb(cursor: psycopg2.extensions.cursor) -> None:
"""
:type cursor: psycopg2.extensions.cursor
"""
cursor.execute(q, v)
r = list(self._psql_helper.single_execute(cb, yield_result_rows=True))
if not r or len(r[0][0]) == 0:
# No returned rows for element ID or if no bytes are stored.
return bytes()
else:
return bytes(r[0][0])
def writable(self) -> bool:
"""
:return: if this instance supports setting bytes.
:rtype: bool
"""
return not self._read_only
def set_bytes(self, b: bytes) -> None:
"""
Set bytes to this data element.
Not all implementations may support setting bytes (check ``writable``
method return).
This base abstract method should be called by sub-class implementations
first. We check for mutability based on ``writable()`` method return.
:param b: bytes to set.
:type b: byte
:raises ReadOnlyError: This data element can only be read from / does
not support writing.
"""
super(PostgresDataElement, self).set_bytes(b)
b_sha1 = hashlib.sha1(b).hexdigest()
# TODO: Fallback to ``content_type()`` return if none provided in self.
if self._content_type:
# We have a content/mime type override as specified at element
# construction.
b_mimetype = self._content_type
q_tmpl = self.CommandTemplates.UPSERT_DATA
else:
# Leave the mimetype alone or set an empty mimetype (none specified
# at construction).
b_mimetype = ""
q_tmpl = self.CommandTemplates.UPSERT_DATA_NO_MIME
q = q_tmpl.format(
table_name=self._table_name,
id_col=self._id_col,
sha1_col=self._sha1_col,
mime_col=self._mime_col,
byte_col=self._byte_col,
)
v = dict(
id_val=self._element_id,
sha1_val=b_sha1,
mime_val=b_mimetype,
byte_val=psycopg2.Binary(b)
)
def cb(cursor: psycopg2.extensions.cursor) -> None:
"""
:type cursor: psycopg2.extensions.cursor
"""
# TODO: Could be smart here and only update if content-type/byte
# data differs while keeping a row-lock between queries.
cursor.execute(q, v)
list(self._psql_helper.single_execute(cb))
```
#### File: impls/data_set/kvstore_backed.py
```python
from typing import Any, Dict, Iterator, Set, Hashable, Type, TypeVar
from smqtk_dataprovider import (
DataElement,
DataSet,
KeyValueStore,
)
from smqtk_core.configuration import (
from_config_dict,
make_default_config,
to_config_dict
)
from smqtk_core.dict import merge_dict
from smqtk_dataprovider.impls.key_value_store.memory import MemoryKeyValueStore
DFLT_KVSTORE = MemoryKeyValueStore()
T = TypeVar("T", bound="KVSDataSet")
class KVSDataSet (DataSet):
"""
DataSet backed by a KeyValueStore implementation.
Since KeyValue stores should be able to contain arbitrary hashable keys and
arbitrary values, this leverages available implementations for the
KeyValueStore interface.
"""
@classmethod
def is_usable(cls) -> bool:
"""
This implementation is always usable.
:return: True
:rtype: bool
"""
# No external dependencies.
return True
@classmethod
def get_default_config(cls) -> Dict:
"""
Generate and return a default configuration dictionary for this class.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(KVSDataSet, cls).get_default_config()
c['kvstore'] = merge_dict(
make_default_config(KeyValueStore.get_impls()),
to_config_dict(c['kvstore'])
)
return c
@classmethod
def from_config(
cls: Type[T],
config_dict: Dict,
merge_default: bool = True
) -> T:
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: KVSDataSet
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
# Convert KVStore config to instance for constructor.
kvs_inst = from_config_dict(config_dict['kvstore'],
KeyValueStore.get_impls())
config_dict['kvstore'] = kvs_inst
return super(KVSDataSet, cls).from_config(config_dict, False)
def __init__(self, kvstore: KeyValueStore = DFLT_KVSTORE):
"""
Create new instance.
If no key-value store is provided, and empty in-memory implementation
instance is used.
:param kvstore: Backing key-value store instance.
:type kvstore: smqtk.representation.KeyValueStore
"""
super(KVSDataSet, self).__init__()
assert isinstance(kvstore, KeyValueStore), \
"Not constructed with a KeyValueStore instance."
self._kvstore = kvstore
def get_config(self) -> Dict[str, Any]:
return {
'kvstore': to_config_dict(self._kvstore)
}
def __iter__(self) -> Iterator[DataElement]:
for v in self._kvstore.values():
yield v
def count(self) -> int:
"""
:return: The number of data elements in this set.
:rtype: int
"""
return len(self._kvstore)
def uuids(self) -> Set[Hashable]:
"""
:return: A new set of uuids represented in this data set.
:rtype: set
"""
return set(self._kvstore.keys())
def has_uuid(self, uuid: Hashable) -> bool:
"""
Test if the given uuid refers to an element in this data set.
:param uuid: Unique ID to test for inclusion. This should match the
type that the set implementation expects or cares about.
:type uuid: collections.abc.Hashable
:return: True if the given uuid matches an element in this set, or
False if it does not.
:rtype: bool
"""
return self._kvstore.has(uuid)
def add_data(self, *elems: DataElement) -> None:
"""
Add the given data element(s) instance to this data set.
:param elems: Data element(s) to add
:type elems: smqtk.representation.DataElement
"""
d = {}
for e in elems:
if isinstance(e, DataElement):
d[e.uuid()] = e
else:
raise ValueError("Invalid element '%s'" % e)
self._kvstore.add_many(d)
def get_data(self, uuid: Hashable) -> DataElement:
"""
Get the data element the given uuid references, or raise an
exception if the uuid does not reference any element in this set.
:raises KeyError: If the given uuid does not refer to an element in
this data set.
:param uuid: The uuid of the element to retrieve.
:type uuid: collections.abc.Hashable
:return: The data element instance for the given uuid.
:rtype: smqtk.representation.DataElement
"""
return self._kvstore.get(uuid)
```
#### File: representation/DataElement/test_from_uri.py
```python
from typing import Dict, Optional, Iterable
import unittest
from smqtk_dataprovider import DataElement, from_uri
from smqtk_dataprovider.exceptions import InvalidUriError
class UnresolvableElement (DataElement):
""" Does not implement from_uri, declaring no support for URI resolution """
def __repr__(self) -> str:
return super(UnresolvableElement, self).__repr__()
def get_config(self) -> Dict:
return {}
def content_type(self) -> None:
return None
def is_empty(self) -> bool:
pass
def get_bytes(self) -> bytes:
return bytes()
def set_bytes(self, b: bytes) -> None:
pass
def writable(self) -> bool:
pass
class ResolvableElement (DataElement):
@classmethod
def from_uri(cls, uri: str) -> "ResolvableElement":
"""
:type uri: str
:rtype: ResolvableElement
"""
if uri.startswith('resolvable://'):
return ResolvableElement()
raise InvalidUriError(uri, "Does not begin with 'resolvable://'.")
def __repr__(self) -> str:
return super(ResolvableElement, self).__repr__()
def get_config(self) -> Dict:
return {}
def content_type(self) -> Optional[str]:
return None
def is_empty(self) -> bool:
pass
def get_bytes(self) -> bytes:
return bytes()
def set_bytes(self, b: bytes) -> None:
pass
def writable(self) -> bool:
pass
class TestDataElementHighLevelFromUri (unittest.TestCase):
def test_no_classes(self) -> None:
def impl_generator() -> Dict:
return {}
self.assertRaises(
InvalidUriError,
from_uri,
'whatever',
impl_generator
)
def test_no_resolvable_options(self) -> None:
"""
when no DataElement implementations provide an implementation for
the ``from_uri`` class method
"""
def impl_generator() -> Iterable:
return {UnresolvableElement}
self.assertRaises(
InvalidUriError,
from_uri,
'something',
impl_generator
)
def test_one_resolvable_option(self) -> None:
"""
When at least one plugin can resolve a URI
"""
def impl_generator() -> Iterable:
return {UnresolvableElement, ResolvableElement}
# URI that can be resolved by ResolvableElement
self.assertIsInstance(
from_uri(
"resolvable://data",
impl_generator
),
ResolvableElement
)
# bad URI even though something can resolve it
self.assertRaises(
InvalidUriError,
from_uri,
'not_resolvable', impl_generator
)
```
#### File: utils/file_utils/test_safe_create_dir.py
```python
import errno
import os
import unittest
import unittest.mock as mock
from smqtk_dataprovider.utils.file import safe_create_dir
class TestSafeCreateDir (unittest.TestCase):
@mock.patch('smqtk_dataprovider.utils.file.os.makedirs')
def test_noExists(self, mock_os_makedirs: mock.MagicMock) -> None:
dir_path = "/some/directory/somewhere"
p = safe_create_dir(dir_path)
self.assertTrue(mock_os_makedirs.called)
self.assertEqual(p, dir_path)
@mock.patch('smqtk_dataprovider.utils.file.os.path.exists')
@mock.patch('smqtk_dataprovider.utils.file.os.makedirs')
def test_existError_alreadyExists(
self,
mock_os_makedirs: mock.MagicMock,
mock_osp_exists: mock.MagicMock
) -> None:
mock_os_makedirs.side_effect = OSError(errno.EEXIST,
"Existing directory")
mock_osp_exists.return_value = True
dir_path = '/existing/dir'
p = safe_create_dir(dir_path)
self.assertTrue(mock_os_makedirs.called)
self.assertTrue(mock_osp_exists.called)
mock_osp_exists.assert_called_once_with(dir_path)
self.assertEqual(p, dir_path)
@mock.patch('smqtk_dataprovider.utils.file.os.path.exists')
@mock.patch('smqtk_dataprovider.utils.file.os.makedirs')
def test_existError_noExist(
self,
mock_os_makedirs: mock.MagicMock,
mock_osp_exists: mock.MagicMock
) -> None:
mock_os_makedirs.side_effect = OSError(errno.EEXIST,
"Existing directory")
mock_osp_exists.return_value = False
dir_path = '/some/dir'
self.assertRaises(OSError, safe_create_dir, dir_path)
mock_os_makedirs.assert_called_once_with(dir_path)
mock_osp_exists.assert_called_once_with(dir_path)
@mock.patch('smqtk_dataprovider.utils.file.os.path.exists')
@mock.patch('smqtk_dataprovider.utils.file.os.makedirs')
def test_otherOsError(
self,
mock_os_makedirs: mock.MagicMock,
mock_osp_exists: mock.MagicMock
) -> None:
mock_os_makedirs.side_effect = OSError(errno.EACCES,
"Permission Denied")
dir_path = '/some/dir'
self.assertRaises(OSError, safe_create_dir, dir_path)
mock_os_makedirs.assert_called_once_with(dir_path)
self.assertFalse(mock_osp_exists.called)
@mock.patch('smqtk_dataprovider.utils.file.os.makedirs')
def test_otherException(self, mock_os_makedirs: mock.MagicMock) -> None:
mock_os_makedirs.side_effect = RuntimeError("Some other exception")
dir_path = 'something'
self.assertRaises(RuntimeError, safe_create_dir, dir_path)
mock_os_makedirs.assert_called_once_with(os.path.abspath(dir_path))
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.